code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
#library
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm # appear the precess of running situation.
import time
from scipy.spatial.distance import pdist, squareform
#0. Data Load
data = pd.read_csv(sys.argv[1], delimiter='\t') # Load train (input text file)
#1. Data Preprocessing
all_elements = [index for index in data.index] # Save index name.
#Make a distance metrix to compute dissimilarity.
distance_matrix = pdist(data, metric='euclidean')
dissimilarity_matrix = np.array(squareform(distance_matrix))
#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)
print(dissimilarity_matrix)
#2. Modeling : DIANA Clustering
#2-1. Compute dissimilarity average in ONE Cluster.
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0 #Set Sum equal zero.
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.
if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,
max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.
if(len(element_list)>1):
avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.
else:
avg = 0
return avg
# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2)
# id in sperated new group = splinter_list
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0: #there is no spliter group, return zero.
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group
avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.
return avg
# 2-3. Cluster Splinter
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf #initate minus.
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.
y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.
diff = x - y # difference between X and Y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node # save index and value which has largest value between two groups.
if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1
return (most_dissm_object_index, 1)
else: # difference is minus, flag = -1
return (-1, -1)
# 2-4. Split
def split(element_list):
main_list = element_list
splinter_group = []
(most_dissm_object_index, flag) = splinter(main_list, splinter_group)
while(flag > 0): # Iterate splinter function until a flag become minus.
main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.
splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.
(most_dissm_object_index, flag) = splinter(element_list, splinter_group)
return (main_list, splinter_group)
# 2-5. look for maximum distance in the current cluster.
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list: #columns
for j in element_list: #rows
#Switch the largest dissimilarity average object(index), value.
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index +=1
if(max_diameter_cluster_value <= 0):
return -1
return max_diameter_cluster_index
# main
if __name__ == '__main__':
# Save arguments list
argv = sys.argv
# Set the number of cluster.
num_clusters = sys.argv[-1]
current_clusters = ([all_elements])
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.
(a_clstr, b_clstr) = split(current_clusters[index])
del current_clusters[index] # Delete current cluster.
current_clusters.append(a_clstr) #original cluster
current_clusters.append(b_clstr) #splinter cluster
index = max_distance(current_clusters)
level +=1
pbar.update(10)
for i in range(num_clusters): # Save the results.
pd.DataFrame(current_clusters[i], columns=['id']).to_csv("%s_cluster_%d.txt" %(sys.argv[1], i), sep='\t')
|
normal
|
{
"blob_id": "267695555e876dc2fe5820dc194490aad9e5e344",
"index": 1361,
"step-1": "<mask token>\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(dissimilarity_matrix)\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\nif __name__ == '__main__':\n argv = sys.argv\n num_clusters = sys.argv[-1]\n current_clusters = [all_elements]\n print(current_clusters)\n level = 1\n index = 0\n with tqdm(total=100) as pbar:\n while index != -1 and level != num_clusters:\n a_clstr, b_clstr = split(current_clusters[index])\n del current_clusters[index]\n current_clusters.append(a_clstr)\n current_clusters.append(b_clstr)\n index = max_distance(current_clusters)\n level += 1\n pbar.update(10)\n for i in range(num_clusters):\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\n '%s_cluster_%d.txt' % (sys.argv[1], i), sep='\\t')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\nfrom tqdm import tqdm\nimport time\nfrom scipy.spatial.distance import pdist, squareform\ndata = pd.read_csv(sys.argv[1], delimiter='\\t')\nall_elements = [index for index in data.index]\ndistance_matrix = pdist(data, metric='euclidean')\ndissimilarity_matrix = np.array(squareform(distance_matrix))\nprint(dissimilarity_matrix)\n\n\ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0\n for i in element_list:\n sum_dissm += dissimilarity_matrix[node][i]\n if dissimilarity_matrix[node][i] > max_diameter:\n max_diameter = dissimilarity_matrix[node][i]\n if len(element_list) > 1:\n avg = sum_dissm / (len(element_list) - 1)\n else:\n avg = 0\n return avg\n\n\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0:\n return 0\n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j]\n avg = sum_dissm / len(splinter_list)\n return avg\n\n\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list)\n y = avg_dissim_across_group_element(node, main_list, splinter_group)\n diff = x - y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node\n if most_dissm_object_value > 0:\n return most_dissm_object_index, 1\n else:\n return -1, -1\n\n\ndef split(element_list):\n main_list = element_list\n splinter_group = []\n most_dissm_object_index, flag = splinter(main_list, splinter_group)\n while flag > 0:\n main_list.remove(most_dissm_object_index)\n splinter_group.append(most_dissm_object_index)\n most_dissm_object_index, flag = splinter(element_list, splinter_group)\n return main_list, splinter_group\n\n\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list:\n for j in element_list:\n if dissimilarity_matrix[i][j] > max_diameter_cluster_value:\n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n index += 1\n if max_diameter_cluster_value <= 0:\n return -1\n return max_diameter_cluster_index\n\n\nif __name__ == '__main__':\n argv = sys.argv\n num_clusters = sys.argv[-1]\n current_clusters = [all_elements]\n print(current_clusters)\n level = 1\n index = 0\n with tqdm(total=100) as pbar:\n while index != -1 and level != num_clusters:\n a_clstr, b_clstr = split(current_clusters[index])\n del current_clusters[index]\n current_clusters.append(a_clstr)\n current_clusters.append(b_clstr)\n index = max_distance(current_clusters)\n level += 1\n pbar.update(10)\n for i in range(num_clusters):\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\n '%s_cluster_%d.txt' % (sys.argv[1], i), sep='\\t')\n",
"step-5": "#library\nimport pandas as pd\nimport numpy as np\nimport sys\n\nfrom tqdm import tqdm # appear the precess of running situation.\nimport time\n\nfrom scipy.spatial.distance import pdist, squareform\n\n#0. Data Load\ndata = pd.read_csv(sys.argv[1], delimiter='\\t') # Load train (input text file)\n\n#1. Data Preprocessing\nall_elements = [index for index in data.index] # Save index name.\n\n#Make a distance metrix to compute dissimilarity.\ndistance_matrix = pdist(data, metric='euclidean')\ndissimilarity_matrix = np.array(squareform(distance_matrix))\n#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)\nprint(dissimilarity_matrix)\n\n#2. Modeling : DIANA Clustering\n#2-1. Compute dissimilarity average in ONE Cluster. \ndef avg_dissim_within_group_element(node, element_list):\n max_diameter = -np.inf\n sum_dissm = 0 #Set Sum equal zero.\n for i in element_list: \n sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.\n if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,\n max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.\n if(len(element_list)>1):\n avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.\n else: \n avg = 0\n return avg\n\n# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2) \n# id in sperated new group = splinter_list\ndef avg_dissim_across_group_element(node, main_list, splinter_list):\n if len(splinter_list) == 0: #there is no spliter group, return zero.\n return 0 \n sum_dissm = 0\n for j in splinter_list:\n sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group \n avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.\n return avg\n\n# 2-3. Cluster Splinter\ndef splinter(main_list, splinter_group):\n most_dissm_object_value = -np.inf #initate minus.\n most_dissm_object_index = None\n for node in main_list:\n x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.\n y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.\n diff = x - y # difference between X and Y\n if diff > most_dissm_object_value:\n most_dissm_object_value = diff\n most_dissm_object_index = node # save index and value which has largest value between two groups.\n if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1\n return (most_dissm_object_index, 1)\n else: # difference is minus, flag = -1\n return (-1, -1)\n\n# 2-4. Split\ndef split(element_list):\n main_list = element_list\n splinter_group = [] \n (most_dissm_object_index, flag) = splinter(main_list, splinter_group)\n while(flag > 0): # Iterate splinter function until a flag become minus.\n main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.\n splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.\n (most_dissm_object_index, flag) = splinter(element_list, splinter_group)\n \n return (main_list, splinter_group)\n\n# 2-5. look for maximum distance in the current cluster.\ndef max_distance(cluster_list):\n max_diameter_cluster_index = None\n max_diameter_cluster_value = -np.inf\n index = 0\n for element_list in cluster_list:\n for i in element_list: #columns\n for j in element_list: #rows\n #Switch the largest dissimilarity average object(index), value. \n if dissimilarity_matrix[i][j] > max_diameter_cluster_value: \n max_diameter_cluster_value = dissimilarity_matrix[i][j]\n max_diameter_cluster_index = index\n \n index +=1\n \n if(max_diameter_cluster_value <= 0):\n return -1\n \n return max_diameter_cluster_index\n\n# main\nif __name__ == '__main__':\n\n # Save arguments list\n argv = sys.argv \n\n # Set the number of cluster.\n num_clusters = sys.argv[-1]\n current_clusters = ([all_elements])\n print(current_clusters)\n level = 1\n index = 0\n\n with tqdm(total=100) as pbar:\n while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.\n (a_clstr, b_clstr) = split(current_clusters[index])\n del current_clusters[index] # Delete current cluster.\n current_clusters.append(a_clstr) #original cluster\n current_clusters.append(b_clstr) #splinter cluster\n index = max_distance(current_clusters)\n level +=1\n pbar.update(10)\n\n for i in range(num_clusters): # Save the results.\n pd.DataFrame(current_clusters[i], columns=['id']).to_csv(\"%s_cluster_%d.txt\" %(sys.argv[1], i), sep='\\t') \n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
from typing import Callable, List, Optional
import numpy as np
import lab1.src.grad.grad_step_strategy as st
import lab1.src.grad.stop_criteria as sc
DEFAULT_EPSILON = 1e-9
DEFAULT_MAX_ITERATIONS = 1e5
def gradient_descent(f: Callable[[np.ndarray], float],
f_grad: Callable[[np.ndarray], np.ndarray],
start: np.ndarray,
step_strategy: st.StepStrategy,
stop_criteria: sc.StopCriteria,
eps_strategy: float = DEFAULT_EPSILON,
eps_stop_criteria: float = DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS,
max_iterations_criteria=DEFAULT_MAX_ITERATIONS,
trajectory: Optional[List] = None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo,
foo_grad,
start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP,
stop_criteria=sc.StopCriteria.BY_GRAD)
print(res)
|
normal
|
{
"blob_id": "919e1f8a4b021d75496f3bcff369261a09362a65",
"index": 3645,
"step-1": "<mask token>\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-3": "<mask token>\nDEFAULT_EPSILON = 1e-09\nDEFAULT_MAX_ITERATIONS = 100000.0\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-4": "from typing import Callable, List, Optional\nimport numpy as np\nimport lab1.src.grad.grad_step_strategy as st\nimport lab1.src.grad.stop_criteria as sc\nDEFAULT_EPSILON = 1e-09\nDEFAULT_MAX_ITERATIONS = 100000.0\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-5": "from typing import Callable, List, Optional\nimport numpy as np\n\nimport lab1.src.grad.grad_step_strategy as st\nimport lab1.src.grad.stop_criteria as sc\n\n\nDEFAULT_EPSILON = 1e-9\nDEFAULT_MAX_ITERATIONS = 1e5\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float],\n f_grad: Callable[[np.ndarray], np.ndarray],\n start: np.ndarray,\n step_strategy: st.StepStrategy,\n stop_criteria: sc.StopCriteria,\n eps_strategy: float = DEFAULT_EPSILON,\n eps_stop_criteria: float = DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS,\n max_iterations_criteria=DEFAULT_MAX_ITERATIONS,\n trajectory: Optional[List] = None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n\n if trajectory is not None:\n trajectory.append(cur_x)\n\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n\n\n res, _ = gradient_descent(foo,\n foo_grad,\n start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP,\n stop_criteria=sc.StopCriteria.BY_GRAD)\n print(res)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f.close()
<|reserved_special_token_0|>
for c, cl in jsonObject.items():
for d in cl:
d.update({'player': c})
l.append(d)
<|reserved_special_token_0|>
ax.set_xticks(labels)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(range(0, 45))
ax.bar(labels, df['club_goals'], width, label='Club')
ax.bar(labels, df['country_goals'], width, label='Country')
ax.set_ylabel('Goals')
ax.set_xlabel('Years')
ax.set_title('Goals by year')
ax.legend()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
f = open('Maradona-goals.json')
jsonObject = json.load(f)
f.close()
l = []
for c, cl in jsonObject.items():
for d in cl:
d.update({'player': c})
l.append(d)
df = pd.DataFrame(l)
labels = df['year']
width = 0.75
fig = plt.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xticks(labels)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(range(0, 45))
ax.bar(labels, df['club_goals'], width, label='Club')
ax.bar(labels, df['country_goals'], width, label='Country')
ax.set_ylabel('Goals')
ax.set_xlabel('Years')
ax.set_title('Goals by year')
ax.legend()
plt.show()
<|reserved_special_token_1|>
import json
import pandas as pd
import matplotlib.pyplot as plt
f = open('Maradona-goals.json')
jsonObject = json.load(f)
f.close()
l = []
for c, cl in jsonObject.items():
for d in cl:
d.update({'player': c})
l.append(d)
df = pd.DataFrame(l)
labels = df['year']
width = 0.75
fig = plt.figure(figsize=(16, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xticks(labels)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(range(0, 45))
ax.bar(labels, df['club_goals'], width, label='Club')
ax.bar(labels, df['country_goals'], width, label='Country')
ax.set_ylabel('Goals')
ax.set_xlabel('Years')
ax.set_title('Goals by year')
ax.legend()
plt.show()
<|reserved_special_token_1|>
import json
import pandas as pd
import matplotlib.pyplot as plt
f = open('Maradona-goals.json')
jsonObject = json.load(f)
f.close()
l = []
for c, cl in jsonObject.items():
for d in cl:
d.update({'player' : c})
l.append(d)
df = pd.DataFrame(l)
labels = df["year"]
width = 0.75
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(1,1,1)
ax.set_xticks(labels)
ax.set_xticklabels(labels, rotation=45)
ax.set_yticks(range(0,45))
ax.bar(labels, df["club_goals"], width, label='Club')
ax.bar(labels, df["country_goals"], width, label='Country')
#ax.grid(color='LIGHTGRAY')
ax.set_ylabel('Goals')
ax.set_xlabel('Years')
ax.set_title('Goals by year')
ax.legend()
plt.show()
|
flexible
|
{
"blob_id": "33e9e45fbe0e3143d75d34c1db283c01e2693f68",
"index": 4967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nf.close()\n<mask token>\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player': c})\n l.append(d)\n<mask token>\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0, 45))\nax.bar(labels, df['club_goals'], width, label='Club')\nax.bar(labels, df['country_goals'], width, label='Country')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\nplt.show()\n",
"step-3": "<mask token>\nf = open('Maradona-goals.json')\njsonObject = json.load(f)\nf.close()\nl = []\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player': c})\n l.append(d)\ndf = pd.DataFrame(l)\nlabels = df['year']\nwidth = 0.75\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(1, 1, 1)\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0, 45))\nax.bar(labels, df['club_goals'], width, label='Club')\nax.bar(labels, df['country_goals'], width, label='Country')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\nplt.show()\n",
"step-4": "import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nf = open('Maradona-goals.json')\njsonObject = json.load(f)\nf.close()\nl = []\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player': c})\n l.append(d)\ndf = pd.DataFrame(l)\nlabels = df['year']\nwidth = 0.75\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(1, 1, 1)\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0, 45))\nax.bar(labels, df['club_goals'], width, label='Club')\nax.bar(labels, df['country_goals'], width, label='Country')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\nplt.show()\n",
"step-5": "import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nf = open('Maradona-goals.json')\njsonObject = json.load(f)\n\nf.close()\n\nl = []\nfor c, cl in jsonObject.items():\n for d in cl:\n d.update({'player' : c})\n l.append(d)\ndf = pd.DataFrame(l)\n\nlabels = df[\"year\"]\n\nwidth = 0.75\n\nfig = plt.figure(figsize=(16,8))\nax = fig.add_subplot(1,1,1)\n\nax.set_xticks(labels)\nax.set_xticklabels(labels, rotation=45)\nax.set_yticks(range(0,45))\n\nax.bar(labels, df[\"club_goals\"], width, label='Club')\nax.bar(labels, df[\"country_goals\"], width, label='Country')\n\n#ax.grid(color='LIGHTGRAY')\nax.set_ylabel('Goals')\nax.set_xlabel('Years')\nax.set_title('Goals by year')\nax.legend()\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.2.13 on 2021-08-11 15:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("notifications", "0011_auto_20171229_1747"),
]
operations = [
migrations.AlterField(
model_name="notification",
name="date",
field=models.DateTimeField(auto_now=True, verbose_name="Dato"),
),
migrations.AlterField(
model_name="notification",
name="priority",
field=models.PositiveIntegerField(
choices=[(0, "Low"), (1, "Medium"), (2, "High")],
default=1,
verbose_name="priority",
),
),
migrations.AlterField(
model_name="notification",
name="sent_mail",
field=models.BooleanField(default=False, verbose_name="sent mail"),
),
]
|
normal
|
{
"blob_id": "fa045ccd4e54332f6c05bf64e3318e05b8123a10",
"index": 3317,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('notifications', '0011_auto_20171229_1747')]\n operations = [migrations.AlterField(model_name='notification', name=\n 'date', field=models.DateTimeField(auto_now=True, verbose_name=\n 'Dato')), migrations.AlterField(model_name='notification', name=\n 'priority', field=models.PositiveIntegerField(choices=[(0, 'Low'),\n (1, 'Medium'), (2, 'High')], default=1, verbose_name='priority')),\n migrations.AlterField(model_name='notification', name='sent_mail',\n field=models.BooleanField(default=False, verbose_name='sent mail'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('notifications', '0011_auto_20171229_1747')]\n operations = [migrations.AlterField(model_name='notification', name=\n 'date', field=models.DateTimeField(auto_now=True, verbose_name=\n 'Dato')), migrations.AlterField(model_name='notification', name=\n 'priority', field=models.PositiveIntegerField(choices=[(0, 'Low'),\n (1, 'Medium'), (2, 'High')], default=1, verbose_name='priority')),\n migrations.AlterField(model_name='notification', name='sent_mail',\n field=models.BooleanField(default=False, verbose_name='sent mail'))]\n",
"step-5": "# Generated by Django 2.2.13 on 2021-08-11 15:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"notifications\", \"0011_auto_20171229_1747\"),\n ]\n\n operations = [\n migrations.AlterField(\n model_name=\"notification\",\n name=\"date\",\n field=models.DateTimeField(auto_now=True, verbose_name=\"Dato\"),\n ),\n migrations.AlterField(\n model_name=\"notification\",\n name=\"priority\",\n field=models.PositiveIntegerField(\n choices=[(0, \"Low\"), (1, \"Medium\"), (2, \"High\")],\n default=1,\n verbose_name=\"priority\",\n ),\n ),\n migrations.AlterField(\n model_name=\"notification\",\n name=\"sent_mail\",\n field=models.BooleanField(default=False, verbose_name=\"sent mail\"),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# encoding: utf-8
"""
File: demo.py
Author: Rock Johnson
Description: 此文件为案例文件
"""
import sys
sys.path.append('../')
try:
from panicbuying.panic import Panic
except:
from panicbuying.panicbuying.panic import Panic
def main():
'''
公共参数:
store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),
version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),
hidden: 是否启用界面(默认启用),
商城抢购:
url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),
书店扒书(quit默认退出, hidden默认不启用):
books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),
account: 账号, password: 密码,
'''
books = {
'书名': '电子书链接地址',
}
xm = Panic(browser='Chrome', version='78.0.0', store='文泉',
books=books, path='路径', account='账号', password='密码',
)
xm.start()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "2f8dff78f5bc5ed18df97e2574b47f0a7711d372",
"index": 547,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n \"\"\"\n books = {'书名': '电子书链接地址'}\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉', books=books,\n path='路径', account='账号', password='密码')\n xm.start()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../')\ntry:\n from panicbuying.panic import Panic\nexcept:\n from panicbuying.panicbuying.panic import Panic\n\n\ndef main():\n \"\"\"\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n \"\"\"\n books = {'书名': '电子书链接地址'}\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉', books=books,\n path='路径', account='账号', password='密码')\n xm.start()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport sys\nsys.path.append('../')\ntry:\n from panicbuying.panic import Panic\nexcept:\n from panicbuying.panicbuying.panic import Panic\n\n\ndef main():\n \"\"\"\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n \"\"\"\n books = {'书名': '电子书链接地址'}\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉', books=books,\n path='路径', account='账号', password='密码')\n xm.start()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# encoding: utf-8\n\n\"\"\"\nFile: demo.py\nAuthor: Rock Johnson\nDescription: 此文件为案例文件\n\"\"\"\nimport sys\n\nsys.path.append('../')\ntry:\n from panicbuying.panic import Panic\nexcept:\n from panicbuying.panicbuying.panic import Panic\n\n\ndef main():\n '''\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n '''\n books = {\n '书名': '电子书链接地址',\n }\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉',\n books=books, path='路径', account='账号', password='密码',\n )\n xm.start()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(parsed['var1'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data = '{"var1": "harry", "var2":56}'
parsed = json.loads(data)
print(parsed['var1'])
<|reserved_special_token_1|>
import json
data = '{"var1": "harry", "var2":56}'
parsed = json.loads(data)
print(parsed['var1'])
<|reserved_special_token_1|>
import json
data = '{"var1": "harry", "var2":56}'
parsed = json.loads(data)
print(parsed['var1'])
# data2 = {"channel_name": "Chill_Out",
# "Cars": ["BMW", "Audi a8", "ferrari"],
# "fridge": ("loki", "Aalu", "pasta"),
# "isbad": False
# }
# jscomp = json.dumps(data2)
# print(jscomp)
|
flexible
|
{
"blob_id": "f0f9541eba29b4488c429c889f3b346d53d0239d",
"index": 7193,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(parsed['var1'])\n",
"step-3": "<mask token>\ndata = '{\"var1\": \"harry\", \"var2\":56}'\nparsed = json.loads(data)\nprint(parsed['var1'])\n",
"step-4": "import json\ndata = '{\"var1\": \"harry\", \"var2\":56}'\nparsed = json.loads(data)\nprint(parsed['var1'])\n",
"step-5": "import json\n\ndata = '{\"var1\": \"harry\", \"var2\":56}'\n\nparsed = json.loads(data)\nprint(parsed['var1'])\n\n# data2 = {\"channel_name\": \"Chill_Out\",\n# \"Cars\": [\"BMW\", \"Audi a8\", \"ferrari\"],\n# \"fridge\": (\"loki\", \"Aalu\", \"pasta\"),\n# \"isbad\": False\n# }\n# jscomp = json.dumps(data2)\n# print(jscomp)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == 'POINT Z (10 20 30)'
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_point2d_wkt():
p = GeometryPoint2D(10, 20)
assert p.wkt == 'POINT (10 20)'
p = GeometryPoint2D(x=-5642.5, y=120.1)
assert p.wkt == 'POINT (-5642.5 120.1)'
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == 'POINT Z (10 20 30)'
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_point2d_wkt():
p = GeometryPoint2D(10, 20)
assert p.wkt == 'POINT (10 20)'
p = GeometryPoint2D(x=-5642.5, y=120.1)
assert p.wkt == 'POINT (-5642.5 120.1)'
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == 'POINT Z (10 20 30)'
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
def test_point3d_to_shapely():
p = GeometryPoint3D(10, 20, -1)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.z == -1
assert sp.wkt == p.wkt
<|reserved_special_token_1|>
from mikeio.spatial import GeometryPoint2D, GeometryPoint3D
def test_point2d_wkt():
p = GeometryPoint2D(10, 20)
assert p.wkt == 'POINT (10 20)'
p = GeometryPoint2D(x=-5642.5, y=120.1)
assert p.wkt == 'POINT (-5642.5 120.1)'
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == 'POINT Z (10 20 30)'
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
def test_point3d_to_shapely():
p = GeometryPoint3D(10, 20, -1)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.z == -1
assert sp.wkt == p.wkt
<|reserved_special_token_1|>
from mikeio.spatial import GeometryPoint2D, GeometryPoint3D
# https://www.ogc.org/standard/sfa/
def test_point2d_wkt():
p = GeometryPoint2D(10, 20)
assert p.wkt == "POINT (10 20)"
p = GeometryPoint2D(x=-5642.5, y=120.1)
assert p.wkt == "POINT (-5642.5 120.1)"
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == "POINT Z (10 20 30)"
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
def test_point3d_to_shapely():
p = GeometryPoint3D(10, 20, -1)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.z == -1
assert sp.wkt == p.wkt
|
flexible
|
{
"blob_id": "ae45a4967a8ee63c27124d345ad4dc0c01033c0e",
"index": 6749,
"step-1": "<mask token>\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n",
"step-4": "from mikeio.spatial import GeometryPoint2D, GeometryPoint3D\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n",
"step-5": "from mikeio.spatial import GeometryPoint2D, GeometryPoint3D\n\n# https://www.ogc.org/standard/sfa/\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == \"POINT (10 20)\"\n\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == \"POINT (-5642.5 120.1)\"\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == \"POINT Z (10 20 30)\"\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.contrib import admin
# Register your models here.
from .models import HuyenQuan
admin.site.register(HuyenQuan)
|
normal
|
{
"blob_id": "16e5a44cb4fbe71eaa9c1f5b00505578de0d2cea",
"index": 6403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(HuyenQuan)\n",
"step-3": "from django.contrib import admin\nfrom .models import HuyenQuan\nadmin.site.register(HuyenQuan)\n",
"step-4": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import HuyenQuan\n\nadmin.site.register(HuyenQuan)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from convert_data2 import array_rule
from convert_data2 import array_packet
import tensorflow as tf
import numpy as np
train_x, train_y = array_packet()
x_input, input_ip = array_rule()
n_nodes_hl1 = 210
n_nodes_hl2 = 210
n_nodes_hl3 = 210
n_classes = 2
batch_size = 500
hm_epochs = 20
x = tf.placeholder('float')
y = tf.placeholder('float')
z = tf.placeholder('float')
hidden_1_layer = {'f_fum': n_nodes_hl1,
'weight': tf.Variable(tf.random_normal([train_x.shape[1], n_nodes_hl1])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum': n_nodes_hl2,
'weight': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'f_fum': n_nodes_hl3,
'weight': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'f_fum': None,
'weight': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'bias': tf.Variable(tf.random_normal([n_classes])), }
def neural_network_model(data):
l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
epoch_loss += c
i += batch_size
print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)
result_array = np.array([])
batch_x = np.array(x_input)
print(batch_x)
result = (sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)))
result_array = np.append(result_array, result)
return result_array
train_neural_network(x)
|
normal
|
{
"blob_id": "1446268583bf9fa3375319eae3c21cf47f47faca",
"index": 7279,
"step-1": "<mask token>\n\n\ndef neural_network_model(data):\n l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer[\n 'bias'])\n l1 = tf.nn.relu(l1)\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias']\n )\n l2 = tf.nn.relu(l2)\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias']\n )\n l3 = tf.nn.relu(l3)\n output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=\n prediction, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i = 0\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y:\n batch_y})\n epoch_loss += c\n i += batch_size\n print('Epoch', epoch + 1, 'completed out of', hm_epochs,\n 'loss:', epoch_loss)\n result_array = np.array([])\n batch_x = np.array(x_input)\n print(batch_x)\n result = sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)\n )\n result_array = np.append(result_array, result)\n return result_array\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef neural_network_model(data):\n l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer[\n 'bias'])\n l1 = tf.nn.relu(l1)\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias']\n )\n l2 = tf.nn.relu(l2)\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias']\n )\n l3 = tf.nn.relu(l3)\n output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=\n prediction, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i = 0\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y:\n batch_y})\n epoch_loss += c\n i += batch_size\n print('Epoch', epoch + 1, 'completed out of', hm_epochs,\n 'loss:', epoch_loss)\n result_array = np.array([])\n batch_x = np.array(x_input)\n print(batch_x)\n result = sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)\n )\n result_array = np.append(result_array, result)\n return result_array\n\n\ntrain_neural_network(x)\n",
"step-3": "<mask token>\ntrain_x, train_y = array_packet()\nx_input, input_ip = array_rule()\nn_nodes_hl1 = 210\nn_nodes_hl2 = 210\nn_nodes_hl3 = 210\nn_classes = 2\nbatch_size = 500\nhm_epochs = 20\nx = tf.placeholder('float')\ny = tf.placeholder('float')\nz = tf.placeholder('float')\nhidden_1_layer = {'f_fum': n_nodes_hl1, 'weight': tf.Variable(tf.\n random_normal([train_x.shape[1], n_nodes_hl1])), 'bias': tf.Variable(tf\n .random_normal([n_nodes_hl1]))}\nhidden_2_layer = {'f_fum': n_nodes_hl2, 'weight': tf.Variable(tf.\n random_normal([n_nodes_hl1, n_nodes_hl2])), 'bias': tf.Variable(tf.\n random_normal([n_nodes_hl2]))}\nhidden_3_layer = {'f_fum': n_nodes_hl3, 'weight': tf.Variable(tf.\n random_normal([n_nodes_hl2, n_nodes_hl3])), 'bias': tf.Variable(tf.\n random_normal([n_nodes_hl3]))}\noutput_layer = {'f_fum': None, 'weight': tf.Variable(tf.random_normal([\n n_nodes_hl3, n_classes])), 'bias': tf.Variable(tf.random_normal([\n n_classes]))}\n\n\ndef neural_network_model(data):\n l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer[\n 'bias'])\n l1 = tf.nn.relu(l1)\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias']\n )\n l2 = tf.nn.relu(l2)\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias']\n )\n l3 = tf.nn.relu(l3)\n output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=\n prediction, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i = 0\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y:\n batch_y})\n epoch_loss += c\n i += batch_size\n print('Epoch', epoch + 1, 'completed out of', hm_epochs,\n 'loss:', epoch_loss)\n result_array = np.array([])\n batch_x = np.array(x_input)\n print(batch_x)\n result = sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)\n )\n result_array = np.append(result_array, result)\n return result_array\n\n\ntrain_neural_network(x)\n",
"step-4": "from convert_data2 import array_rule\nfrom convert_data2 import array_packet\nimport tensorflow as tf\nimport numpy as np\ntrain_x, train_y = array_packet()\nx_input, input_ip = array_rule()\nn_nodes_hl1 = 210\nn_nodes_hl2 = 210\nn_nodes_hl3 = 210\nn_classes = 2\nbatch_size = 500\nhm_epochs = 20\nx = tf.placeholder('float')\ny = tf.placeholder('float')\nz = tf.placeholder('float')\nhidden_1_layer = {'f_fum': n_nodes_hl1, 'weight': tf.Variable(tf.\n random_normal([train_x.shape[1], n_nodes_hl1])), 'bias': tf.Variable(tf\n .random_normal([n_nodes_hl1]))}\nhidden_2_layer = {'f_fum': n_nodes_hl2, 'weight': tf.Variable(tf.\n random_normal([n_nodes_hl1, n_nodes_hl2])), 'bias': tf.Variable(tf.\n random_normal([n_nodes_hl2]))}\nhidden_3_layer = {'f_fum': n_nodes_hl3, 'weight': tf.Variable(tf.\n random_normal([n_nodes_hl2, n_nodes_hl3])), 'bias': tf.Variable(tf.\n random_normal([n_nodes_hl3]))}\noutput_layer = {'f_fum': None, 'weight': tf.Variable(tf.random_normal([\n n_nodes_hl3, n_classes])), 'bias': tf.Variable(tf.random_normal([\n n_classes]))}\n\n\ndef neural_network_model(data):\n l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer[\n 'bias'])\n l1 = tf.nn.relu(l1)\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias']\n )\n l2 = tf.nn.relu(l2)\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias']\n )\n l3 = tf.nn.relu(l3)\n output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=\n prediction, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i = 0\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y:\n batch_y})\n epoch_loss += c\n i += batch_size\n print('Epoch', epoch + 1, 'completed out of', hm_epochs,\n 'loss:', epoch_loss)\n result_array = np.array([])\n batch_x = np.array(x_input)\n print(batch_x)\n result = sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)\n )\n result_array = np.append(result_array, result)\n return result_array\n\n\ntrain_neural_network(x)\n",
"step-5": "from convert_data2 import array_rule\nfrom convert_data2 import array_packet\nimport tensorflow as tf\nimport numpy as np\n\ntrain_x, train_y = array_packet()\nx_input, input_ip = array_rule()\n\nn_nodes_hl1 = 210\nn_nodes_hl2 = 210\nn_nodes_hl3 = 210\n\nn_classes = 2\nbatch_size = 500\nhm_epochs = 20\n\nx = tf.placeholder('float')\ny = tf.placeholder('float')\nz = tf.placeholder('float')\n\nhidden_1_layer = {'f_fum': n_nodes_hl1,\n 'weight': tf.Variable(tf.random_normal([train_x.shape[1], n_nodes_hl1])),\n 'bias': tf.Variable(tf.random_normal([n_nodes_hl1]))}\n\nhidden_2_layer = {'f_fum': n_nodes_hl2,\n 'weight': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),\n 'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}\n\nhidden_3_layer = {'f_fum': n_nodes_hl3,\n 'weight': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),\n 'bias': tf.Variable(tf.random_normal([n_nodes_hl3]))}\n\noutput_layer = {'f_fum': None,\n 'weight': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),\n 'bias': tf.Variable(tf.random_normal([n_classes])), }\n\n\ndef neural_network_model(data):\n l1 = tf.add(tf.matmul(data, hidden_1_layer['weight']), hidden_1_layer['bias'])\n l1 = tf.nn.relu(l1)\n\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias'])\n l2 = tf.nn.relu(l2)\n\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weight']), hidden_3_layer['bias'])\n l3 = tf.nn.relu(l3)\n\n output = tf.matmul(l3, output_layer['weight']) + output_layer['bias']\n\n return output\n\n\ndef train_neural_network(x):\n prediction = neural_network_model(x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))\n optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i = 0\n\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n\n _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n y: batch_y})\n epoch_loss += c\n i += batch_size\n\n print('Epoch', epoch + 1, 'completed out of', hm_epochs, 'loss:', epoch_loss)\n\n result_array = np.array([])\n batch_x = np.array(x_input)\n print(batch_x)\n result = (sess.run(tf.argmax(prediction.eval(feed_dict={z: batch_x}), 1)))\n result_array = np.append(result_array, result)\n\n return result_array\n\ntrain_neural_network(x)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import sys
import sucessor
import expande
from collections import deque
def busca_caminho(nodo_final, nodo_inicial):
pilha_acoes = deque() # iremos empilhar as acoes já que a estaremos com a ordem reversa a priori
v = nodo_final
while v != nodo_inicial:
pilha_acoes.append(v.acao)
v = v.pai
return pilha_acoes
def busca_dfs(nodo_inicial, custo_maximo_atual):
objetivo = "12345678_"
custo_maximo_absoluto = 100 #profundedade maxima tolerada
explorados = set()
fronteira = deque()
fronteira.append(nodo_inicial)
if custo_maximo_atual > custo_maximo_absoluto: #se a profundedade maxima atual é maior do que a profundedade maxima tolerada retorna -1 pois provavelmente não existe uma solução
return -1
while True:
if not fronteira: # Se a fronteira esta vazia
explorados = None
return busca_dfs(nodo_inicial, custo_maximo_atual + 1) #executa a função novamente mas dessa vez com uma profundedade maxima maior
v = fronteira.pop() #pop em vez de popleft para tratar a fronteira como pilha
if v.estado == objetivo:
return busca_caminho(v, nodo_inicial)
if v not in explorados:
explorados.add(v)
estados_sucessores = sucessor.sucessor(v.estado)
# Cada estado atingível a partir de v é acrescentado à fronteira caso a profundidade dos novos estados não exceda a profundidade máxima
if (v.custo + 1) < custo_maximo_atual:
for e in estados_sucessores:
filho = expande.Nodo(e[1], v, e[0], v.custo + 1)
fronteira.append(filho)
def main():
#como eu não queria ter que modificar as classes que já existiam, usei o custo de cada estado como um sinônimo de profundidade, já que os novos estados sempre tem custo = custo do pai + 1
estado_inicial = sys.argv[1]
custo_inicial = 0
pai = expande.Nodo(estado_inicial, 0, "", custo_inicial)
caminho = busca_dfs(pai, 1)
while caminho:
print(caminho.pop(), end = " ")
print()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a85a7ad6ffb2b9aa5f5326d11c75ddbee680fac4",
"index": 673,
"step-1": "<mask token>\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef busca_caminho(nodo_final, nodo_inicial):\n pilha_acoes = deque()\n v = nodo_final\n while v != nodo_inicial:\n pilha_acoes.append(v.acao)\n v = v.pai\n return pilha_acoes\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef busca_caminho(nodo_final, nodo_inicial):\n pilha_acoes = deque()\n v = nodo_final\n while v != nodo_inicial:\n pilha_acoes.append(v.acao)\n v = v.pai\n return pilha_acoes\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport sucessor\nimport expande\nfrom collections import deque\n\n\ndef busca_caminho(nodo_final, nodo_inicial):\n pilha_acoes = deque()\n v = nodo_final\n while v != nodo_inicial:\n pilha_acoes.append(v.acao)\n v = v.pai\n return pilha_acoes\n\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n objetivo = '12345678_'\n custo_maximo_absoluto = 100\n explorados = set()\n fronteira = deque()\n fronteira.append(nodo_inicial)\n if custo_maximo_atual > custo_maximo_absoluto:\n return -1\n while True:\n if not fronteira:\n explorados = None\n return busca_dfs(nodo_inicial, custo_maximo_atual + 1)\n v = fronteira.pop()\n if v.estado == objetivo:\n return busca_caminho(v, nodo_inicial)\n if v not in explorados:\n explorados.add(v)\n estados_sucessores = sucessor.sucessor(v.estado)\n if v.custo + 1 < custo_maximo_atual:\n for e in estados_sucessores:\n filho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n fronteira.append(filho)\n\n\ndef main():\n estado_inicial = sys.argv[1]\n custo_inicial = 0\n pai = expande.Nodo(estado_inicial, 0, '', custo_inicial)\n caminho = busca_dfs(pai, 1)\n while caminho:\n print(caminho.pop(), end=' ')\n print()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\nimport sucessor\nimport expande\nfrom collections import deque\n\ndef busca_caminho(nodo_final, nodo_inicial):\n\tpilha_acoes = deque() # iremos empilhar as acoes já que a estaremos com a ordem reversa a priori\n\tv = nodo_final\n\twhile v != nodo_inicial:\n\t\tpilha_acoes.append(v.acao)\n\t\tv = v.pai\n\treturn pilha_acoes\n\ndef busca_dfs(nodo_inicial, custo_maximo_atual):\n\tobjetivo = \"12345678_\"\n\tcusto_maximo_absoluto = 100 #profundedade maxima tolerada\n\texplorados = set()\n\tfronteira = deque()\n\tfronteira.append(nodo_inicial)\n\tif custo_maximo_atual > custo_maximo_absoluto: #se a profundedade maxima atual é maior do que a profundedade maxima tolerada retorna -1 pois provavelmente não existe uma solução\n\t\treturn -1\n\twhile True:\n\t\tif not fronteira: # Se a fronteira esta vazia\n\t\t\texplorados = None\n\t\t\treturn busca_dfs(nodo_inicial, custo_maximo_atual + 1) #executa a função novamente mas dessa vez com uma profundedade maxima maior\n\t\tv = fronteira.pop() #pop em vez de popleft para tratar a fronteira como pilha\n\t\tif v.estado == objetivo:\n\t\t\treturn busca_caminho(v, nodo_inicial)\n\t\tif v not in explorados:\n\t\t\texplorados.add(v)\n\t\t\testados_sucessores = sucessor.sucessor(v.estado)\n\t\t\t# Cada estado atingível a partir de v é acrescentado à fronteira caso a profundidade dos novos estados não exceda a profundidade máxima\n\t\t\tif (v.custo + 1) < custo_maximo_atual:\n\t\t\t\tfor e in estados_sucessores:\n\t\t\t\t\tfilho = expande.Nodo(e[1], v, e[0], v.custo + 1)\n\t\t\t\t\tfronteira.append(filho)\n\ndef main():\n\t#como eu não queria ter que modificar as classes que já existiam, usei o custo de cada estado como um sinônimo de profundidade, já que os novos estados sempre tem custo = custo do pai + 1\n\testado_inicial = sys.argv[1]\n\tcusto_inicial = 0\n\tpai = expande.Nodo(estado_inicial, 0, \"\", custo_inicial)\n\tcaminho = busca_dfs(pai, 1)\n\n\twhile caminho:\n\t\tprint(caminho.pop(), end = \" \")\n\tprint()\n\nif __name__ == '__main__':\n\tmain()\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def visualize_data(positive_images, negative_images):
figure = plt.figure()
count = 0
for i in range(positive_images.shape[0]):
count += 1
figure.add_subplot(2, positive_images.shape[0], count)
plt.imshow(positive_images[i, :, :])
plt.axis('off')
plt.title('1')
figure.add_subplot(1, negative_images.shape[0], count)
plt.imshow(negative_images[i, :, :])
plt.axis('off')
plt.title('0')
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
def visualize_data(positive_images, negative_images):
figure = plt.figure()
count = 0
for i in range(positive_images.shape[0]):
count += 1
figure.add_subplot(2, positive_images.shape[0], count)
plt.imshow(positive_images[i, :, :])
plt.axis('off')
plt.title('1')
figure.add_subplot(1, negative_images.shape[0], count)
plt.imshow(negative_images[i, :, :])
plt.axis('off')
plt.title('0')
plt.show()
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
def visualize_data(positive_images, negative_images):
# INPUTS
# positive_images - Images where the label = 1 (True)
# negative_images - Images where the label = 0 (False)
figure = plt.figure()
count = 0
for i in range(positive_images.shape[0]):
count += 1
figure.add_subplot(2, positive_images.shape[0], count)
plt.imshow(positive_images[i, :, :])
plt.axis('off')
plt.title("1")
figure.add_subplot(1, negative_images.shape[0], count)
plt.imshow(negative_images[i, :, :])
plt.axis('off')
plt.title("0")
plt.show()
|
flexible
|
{
"blob_id": "ebe79cf1b54870055ce8502430f5fae833f3d96d",
"index": 3121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef visualize_data(positive_images, negative_images):\n figure = plt.figure()\n count = 0\n for i in range(positive_images.shape[0]):\n count += 1\n figure.add_subplot(2, positive_images.shape[0], count)\n plt.imshow(positive_images[i, :, :])\n plt.axis('off')\n plt.title('1')\n figure.add_subplot(1, negative_images.shape[0], count)\n plt.imshow(negative_images[i, :, :])\n plt.axis('off')\n plt.title('0')\n plt.show()\n",
"step-3": "import matplotlib.pyplot as plt\n\n\ndef visualize_data(positive_images, negative_images):\n figure = plt.figure()\n count = 0\n for i in range(positive_images.shape[0]):\n count += 1\n figure.add_subplot(2, positive_images.shape[0], count)\n plt.imshow(positive_images[i, :, :])\n plt.axis('off')\n plt.title('1')\n figure.add_subplot(1, negative_images.shape[0], count)\n plt.imshow(negative_images[i, :, :])\n plt.axis('off')\n plt.title('0')\n plt.show()\n",
"step-4": "import matplotlib.pyplot as plt\r\n\r\ndef visualize_data(positive_images, negative_images):\r\n # INPUTS\r\n # positive_images - Images where the label = 1 (True)\r\n # negative_images - Images where the label = 0 (False)\r\n\r\n figure = plt.figure()\r\n count = 0\r\n for i in range(positive_images.shape[0]):\r\n count += 1\r\n figure.add_subplot(2, positive_images.shape[0], count)\r\n plt.imshow(positive_images[i, :, :])\r\n plt.axis('off')\r\n plt.title(\"1\")\r\n\r\n figure.add_subplot(1, negative_images.shape[0], count)\r\n plt.imshow(negative_images[i, :, :])\r\n plt.axis('off')\r\n plt.title(\"0\")\r\n plt.show()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,
parameter_std=1, error_mean=0, error_std=1):
"""
Generate a column that is a random linear combination of
X1, X2 and X3 plus some random error
"""
length = X.shape[0]
param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=
(num_dependent_cols,))
error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))
result = np.zeros(length)
for i in range(num_dependent_cols):
result += param[i] * X[:, i]
return result + error
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,
parameter_std=1, error_mean=0, error_std=1):
"""
Generate a column that is a random linear combination of
X1, X2 and X3 plus some random error
"""
length = X.shape[0]
param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=
(num_dependent_cols,))
error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))
result = np.zeros(length)
for i in range(num_dependent_cols):
result += param[i] * X[:, i]
return result + error
np.random.seed(472)
<|reserved_special_token_0|>
for i in range(num_independent_cols):
X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(
1, 5), size=(num_data,))
for i in range(3, 1000):
X[:, i] = linear_combination_plus_error(X, num_dependent_cols=
num_independent_cols, parameter_std=2, error_std=1)
<|reserved_special_token_0|>
np.random.shuffle(col_nums)
<|reserved_special_token_0|>
X[:, 1000] += abs(min(X[:, 1000])) + 5
<|reserved_special_token_0|>
X1_df.to_csv('./sensors1.csv', header=None, index=None)
<|reserved_special_token_0|>
X2_df.to_csv('./sensors2.csv', header=None, index=None)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,
parameter_std=1, error_mean=0, error_std=1):
"""
Generate a column that is a random linear combination of
X1, X2 and X3 plus some random error
"""
length = X.shape[0]
param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=
(num_dependent_cols,))
error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))
result = np.zeros(length)
for i in range(num_dependent_cols):
result += param[i] * X[:, i]
return result + error
np.random.seed(472)
num_data = 10100
num_independent_cols = 3
X = np.zeros((num_data, 1001))
for i in range(num_independent_cols):
X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(
1, 5), size=(num_data,))
for i in range(3, 1000):
X[:, i] = linear_combination_plus_error(X, num_dependent_cols=
num_independent_cols, parameter_std=2, error_std=1)
col_nums = list(range(1000))
np.random.shuffle(col_nums)
X[:, list(range(1000))] = X[:, col_nums]
X[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=
num_independent_cols, parameter_mean=5, parameter_std=2)
X[:, 1000] += abs(min(X[:, 1000])) + 5
X = np.floor(X * 1000) / 1000
X1 = X[:10000, :]
X2 = X[10000:, :]
X1_df = pd.DataFrame(X1)
X1_df.to_csv('./sensors1.csv', header=None, index=None)
X2_df = pd.DataFrame(X2)
X2_df.to_csv('./sensors2.csv', header=None, index=None)
<|reserved_special_token_1|>
import random
import numpy as np
import pandas as pd
def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,
parameter_std=1, error_mean=0, error_std=1):
"""
Generate a column that is a random linear combination of
X1, X2 and X3 plus some random error
"""
length = X.shape[0]
param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=
(num_dependent_cols,))
error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))
result = np.zeros(length)
for i in range(num_dependent_cols):
result += param[i] * X[:, i]
return result + error
np.random.seed(472)
num_data = 10100
num_independent_cols = 3
X = np.zeros((num_data, 1001))
for i in range(num_independent_cols):
X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(
1, 5), size=(num_data,))
for i in range(3, 1000):
X[:, i] = linear_combination_plus_error(X, num_dependent_cols=
num_independent_cols, parameter_std=2, error_std=1)
col_nums = list(range(1000))
np.random.shuffle(col_nums)
X[:, list(range(1000))] = X[:, col_nums]
X[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=
num_independent_cols, parameter_mean=5, parameter_std=2)
X[:, 1000] += abs(min(X[:, 1000])) + 5
X = np.floor(X * 1000) / 1000
X1 = X[:10000, :]
X2 = X[10000:, :]
X1_df = pd.DataFrame(X1)
X1_df.to_csv('./sensors1.csv', header=None, index=None)
X2_df = pd.DataFrame(X2)
X2_df.to_csv('./sensors2.csv', header=None, index=None)
<|reserved_special_token_1|>
import random
import numpy as np
import pandas as pd
def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0, parameter_std=1, error_mean=0, error_std=1):
"""
Generate a column that is a random linear combination of
X1, X2 and X3 plus some random error
"""
length = X.shape[0]
param = np.random.normal(loc=parameter_mean,
scale=parameter_std,
size=(num_dependent_cols,))
error = np.random.normal(loc=error_mean,
scale=error_std,
size=(length,))
result = np.zeros(length,)
for i in range(num_dependent_cols):
result += param[i] * X[:, i]
return result + error
np.random.seed(472)
num_data = 10100
num_independent_cols = 3
X = np.zeros((num_data, 1001))
# Generate 3 principal components
for i in range(num_independent_cols):
X[:, i] = np.random.normal(np.random.uniform(-5, 5),
np.random.uniform(1, 5), size=(num_data,))
# Generate other columns
for i in range(3, 1000):
X[:, i] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_std=2, error_std=1)
# Randomly suffle the 1000 feature columns
col_nums = list(range(1000))
np.random.shuffle(col_nums)
X[:, list(range(1000))] = X[:, col_nums]
# Randomly generate Y
X[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_mean=5, parameter_std=2)
X[:, 1000] += abs(min(X[:, 1000])) + 5
# Take only three digits after decimal point
X = np.floor(X * 1000) / 1000
# Split the data into 2 files
X1 = X[:10000, :]
X2 = X[10000:, :]
X1_df = pd.DataFrame(X1)
X1_df.to_csv("./sensors1.csv", header=None, index=None)
X2_df = pd.DataFrame(X2)
X2_df.to_csv("./sensors2.csv", header=None, index=None)
|
flexible
|
{
"blob_id": "48f2cc5b6d53c7317ad882947cabbc367cda0fb7",
"index": 905,
"step-1": "<mask token>\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\nnp.random.seed(472)\n<mask token>\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(\n 1, 5), size=(num_data,))\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_std=2, error_std=1)\n<mask token>\nnp.random.shuffle(col_nums)\n<mask token>\nX[:, 1000] += abs(min(X[:, 1000])) + 5\n<mask token>\nX1_df.to_csv('./sensors1.csv', header=None, index=None)\n<mask token>\nX2_df.to_csv('./sensors2.csv', header=None, index=None)\n",
"step-3": "<mask token>\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\nnp.random.seed(472)\nnum_data = 10100\nnum_independent_cols = 3\nX = np.zeros((num_data, 1001))\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(\n 1, 5), size=(num_data,))\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_std=2, error_std=1)\ncol_nums = list(range(1000))\nnp.random.shuffle(col_nums)\nX[:, list(range(1000))] = X[:, col_nums]\nX[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_mean=5, parameter_std=2)\nX[:, 1000] += abs(min(X[:, 1000])) + 5\nX = np.floor(X * 1000) / 1000\nX1 = X[:10000, :]\nX2 = X[10000:, :]\nX1_df = pd.DataFrame(X1)\nX1_df.to_csv('./sensors1.csv', header=None, index=None)\nX2_df = pd.DataFrame(X2)\nX2_df.to_csv('./sensors2.csv', header=None, index=None)\n",
"step-4": "import random\nimport numpy as np\nimport pandas as pd\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\nnp.random.seed(472)\nnum_data = 10100\nnum_independent_cols = 3\nX = np.zeros((num_data, 1001))\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(\n 1, 5), size=(num_data,))\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_std=2, error_std=1)\ncol_nums = list(range(1000))\nnp.random.shuffle(col_nums)\nX[:, list(range(1000))] = X[:, col_nums]\nX[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_mean=5, parameter_std=2)\nX[:, 1000] += abs(min(X[:, 1000])) + 5\nX = np.floor(X * 1000) / 1000\nX1 = X[:10000, :]\nX2 = X[10000:, :]\nX1_df = pd.DataFrame(X1)\nX1_df.to_csv('./sensors1.csv', header=None, index=None)\nX2_df = pd.DataFrame(X2)\nX2_df.to_csv('./sensors2.csv', header=None, index=None)\n",
"step-5": "import random\nimport numpy as np\nimport pandas as pd\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0, parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean,\n scale=parameter_std,\n size=(num_dependent_cols,))\n error = np.random.normal(loc=error_mean,\n scale=error_std,\n size=(length,))\n result = np.zeros(length,)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n \n\nnp.random.seed(472)\nnum_data = 10100\nnum_independent_cols = 3\n\nX = np.zeros((num_data, 1001))\n\n# Generate 3 principal components\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), \n np.random.uniform(1, 5), size=(num_data,))\n\n\n# Generate other columns\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_std=2, error_std=1)\n\n# Randomly suffle the 1000 feature columns\ncol_nums = list(range(1000))\nnp.random.shuffle(col_nums)\nX[:, list(range(1000))] = X[:, col_nums]\n\n# Randomly generate Y\nX[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_mean=5, parameter_std=2)\nX[:, 1000] += abs(min(X[:, 1000])) + 5\n\n\n# Take only three digits after decimal point\nX = np.floor(X * 1000) / 1000\n\n\n# Split the data into 2 files\nX1 = X[:10000, :]\nX2 = X[10000:, :]\nX1_df = pd.DataFrame(X1)\nX1_df.to_csv(\"./sensors1.csv\", header=None, index=None)\n\nX2_df = pd.DataFrame(X2)\nX2_df.to_csv(\"./sensors2.csv\", header=None, index=None)\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def descending_order(num):
return int(''.join(sorted(str(num), reverse=True)))
import unittest
class TestIsBalanced(unittest.TestCase):
def test_is_balanced(self):
self.assertEquals(descending_order(0), 0)
self.assertEquals(descending_order(15), 51)
self.assertEquals(descending_order(123456789), 987654321)
self.assertEquals(descending_order(1201), 2110)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "fc5d0dd16b87ab073bf4b054bd2641bdec88e019",
"index": 6594,
"step-1": "<mask token>\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\n<mask token>\n",
"step-2": "def descending_order(num):\n return int(''.join(sorted(str(num), reverse=True)))\n\n\n<mask token>\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\n<mask token>\n",
"step-3": "def descending_order(num):\n return int(''.join(sorted(str(num), reverse=True)))\n\n\n<mask token>\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "def descending_order(num):\n return int(''.join(sorted(str(num), reverse=True)))\n\n\nimport unittest\n\n\nclass TestIsBalanced(unittest.TestCase):\n\n def test_is_balanced(self):\n self.assertEquals(descending_order(0), 0)\n self.assertEquals(descending_order(15), 51)\n self.assertEquals(descending_order(123456789), 987654321)\n self.assertEquals(descending_order(1201), 2110)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
<|reserved_special_token_0|>
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
<|reserved_special_token_0|>
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
def plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,
box_color='k'):
xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,
vap_cutoff, liq_in, vap_in, int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3)
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])
ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])
ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])
ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')
pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):
ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax1.set_zlabel('y')
plt.xlim([0, xlim])
plt.ylim([0, ylim])
ax1.set_xlim([0, xlim])
ax1.set_ylim([0, ylim])
ax1.set_zlim([0, zlim])
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from itertools import product, combinations
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
def plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,
box_color='k'):
xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,
vap_cutoff, liq_in, vap_in, int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3)
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])
ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])
ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])
ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')
pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):
ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax1.set_zlabel('y')
plt.xlim([0, xlim])
plt.ylim([0, ylim])
ax1.set_xlim([0, xlim])
ax1.set_ylim([0, ylim])
ax1.set_zlim([0, zlim])
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 14:34:56 2019
ref :
https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib
@author: jiedeng
"""
import numpy as np
from itertools import product, combinations
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
def switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0],inter.universe.dimensions[1],inter.universe.dimensions[2]
for i in range(len(triangles)):
## swap y and z
tmp = np.array([triangles[i][:,0],triangles[i][:,2],triangles[i][:,1]]).T
if triangles[i][:,-1][0] < zlim:
interface1[i] = tmp + np.array([0,liq_cutoff,0])
interface2[i] = tmp + np.array([0,vap_cutoff,0])
else:
interface1[i] = tmp - np.array([0,liq_cutoff,0])
interface2[i] = tmp - np.array([0,vap_cutoff,0])
return xlim,zlim,ylim,interface1,interface2
def plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in,box_color='k'):
xlim,zlim,ylim,interface1,interface2 = switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
# Fancy indexing: `verts[faces]` to generate a collection of triangles
# mesh1 = Poly3DCollection(triangles)
# mesh1.set_edgecolor('none')
# mesh1.set_alpha(0.3)
# ax1.add_collection3d(mesh1)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3);
# mesh3.set_facecolor('b')
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:,0],pos[:,2],pos[:,1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[::, 0], pos_liq[::, 1], pos_liq[::, 2]])
ax1.scatter(xyz_liq[0],xyz_liq[1],xyz_liq[2],color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[::, 0], pos_vap[::, 1], pos_vap[::, 2]])
ax1.scatter(xyz_vap[0],xyz_vap[1],xyz_vap[2],color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[::, 0], pos_int[::, 1], pos_int[::, 2]])
ax1.scatter(xyz_int[0],xyz_int[1],xyz_int[2],color='k')
pts = np.array(list(product([0,xlim], [0,ylim], [0,zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s-e)) in (xlim,ylim,zlim):
ax1.plot3D(*zip(s, e), 'k-',color=box_color,linewidth=1)
ax1.set_xlabel("x")
ax1.set_ylabel("z")
ax1.set_zlabel("y")
plt.xlim([0,xlim])
plt.ylim([0,ylim])
# plt.ylim([0,ylim])
ax1.set_xlim([0,xlim])
ax1.set_ylim([0,ylim])
ax1.set_zlim([0,zlim])
# ax1.set_aspect('equal')
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),])
# print(limits)
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
#plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)
|
flexible
|
{
"blob_id": "086c74669b6762a6b35e8a46f816db2f4f172caa",
"index": 1437,
"step-1": "<mask token>\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\n<mask token>\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-2": "<mask token>\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\n<mask token>\n\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-3": "<mask token>\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\ndef plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,\n box_color='k'):\n xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,\n vap_cutoff, liq_in, vap_in, int_in)\n fig = plt.figure(figsize=(12, 12))\n ax1 = fig.add_subplot(111, projection='3d')\n ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.grid(False)\n mesh2 = Poly3DCollection(interface1)\n mesh2.set_edgecolor('none')\n mesh2.set_alpha(0.3)\n ax1.add_collection3d(mesh2)\n mesh3 = Poly3DCollection(interface2)\n mesh3.set_edgecolor('none')\n mesh3.set_alpha(0.3)\n ax1.add_collection3d(mesh3)\n pos = inter.universe.atoms.positions\n pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T\n pos_liq = pos[liq_in]\n xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])\n ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')\n pos_vap = pos[vap_in]\n xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])\n ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')\n pos_int = pos[int_in]\n xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])\n ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')\n pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))\n for s, e in combinations(pts, 2):\n if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):\n ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)\n ax1.set_xlabel('x')\n ax1.set_ylabel('z')\n ax1.set_zlabel('y')\n plt.xlim([0, xlim])\n plt.ylim([0, ylim])\n ax1.set_xlim([0, xlim])\n ax1.set_ylim([0, ylim])\n ax1.set_zlim([0, zlim])\n set_axes_equal(ax1)\n ax1.view_init(0, 10)\n plt.tight_layout()\n plt.show()\n\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom itertools import product, combinations\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport matplotlib.pyplot as plt\n\n\ndef switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[\n 1], inter.universe.dimensions[2]\n for i in range(len(triangles)):\n tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i\n ][:, 1]]).T\n if triangles[i][:, -1][0] < zlim:\n interface1[i] = tmp + np.array([0, liq_cutoff, 0])\n interface2[i] = tmp + np.array([0, vap_cutoff, 0])\n else:\n interface1[i] = tmp - np.array([0, liq_cutoff, 0])\n interface2[i] = tmp - np.array([0, vap_cutoff, 0])\n return xlim, zlim, ylim, interface1, interface2\n\n\ndef plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,\n box_color='k'):\n xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,\n vap_cutoff, liq_in, vap_in, int_in)\n fig = plt.figure(figsize=(12, 12))\n ax1 = fig.add_subplot(111, projection='3d')\n ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.grid(False)\n mesh2 = Poly3DCollection(interface1)\n mesh2.set_edgecolor('none')\n mesh2.set_alpha(0.3)\n ax1.add_collection3d(mesh2)\n mesh3 = Poly3DCollection(interface2)\n mesh3.set_edgecolor('none')\n mesh3.set_alpha(0.3)\n ax1.add_collection3d(mesh3)\n pos = inter.universe.atoms.positions\n pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T\n pos_liq = pos[liq_in]\n xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])\n ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')\n pos_vap = pos[vap_in]\n xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])\n ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')\n pos_int = pos[int_in]\n xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])\n ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')\n pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))\n for s, e in combinations(pts, 2):\n if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):\n ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)\n ax1.set_xlabel('x')\n ax1.set_ylabel('z')\n ax1.set_zlabel('y')\n plt.xlim([0, xlim])\n plt.ylim([0, ylim])\n ax1.set_xlim([0, xlim])\n ax1.set_ylim([0, ylim])\n ax1.set_zlim([0, zlim])\n set_axes_equal(ax1)\n ax1.view_init(0, 10)\n plt.tight_layout()\n plt.show()\n\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\n\ndef set_axes_equal(ax):\n \"\"\"Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n \"\"\"\n limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 30 14:34:56 2019\nref : \n https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib\n\n@author: jiedeng\n\"\"\"\nimport numpy as np\nfrom itertools import product, combinations\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport matplotlib.pyplot as plt\n\ndef switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in):\n triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]\n \n\n interface1 = np.zeros_like(triangles)\n interface2 = np.zeros_like(triangles)\n \n xlim, zlim, ylim = inter.universe.dimensions[0],inter.universe.dimensions[1],inter.universe.dimensions[2]\n \n for i in range(len(triangles)):\n ## swap y and z\n tmp = np.array([triangles[i][:,0],triangles[i][:,2],triangles[i][:,1]]).T\n if triangles[i][:,-1][0] < zlim:\n interface1[i] = tmp + np.array([0,liq_cutoff,0])\n interface2[i] = tmp + np.array([0,vap_cutoff,0])\n else:\n interface1[i] = tmp - np.array([0,liq_cutoff,0])\n interface2[i] = tmp - np.array([0,vap_cutoff,0]) \n return xlim,zlim,ylim,interface1,interface2\n \ndef plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in,box_color='k'):\n \n xlim,zlim,ylim,interface1,interface2 = switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)\n fig = plt.figure(figsize=(12, 12))\n \n ax1 = fig.add_subplot(111, projection='3d')\n ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax1.grid(False)\n\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\n# mesh1 = Poly3DCollection(triangles)\n# mesh1.set_edgecolor('none')\n# mesh1.set_alpha(0.3)\n# ax1.add_collection3d(mesh1)\n \n mesh2 = Poly3DCollection(interface1)\n mesh2.set_edgecolor('none')\n mesh2.set_alpha(0.3)\n ax1.add_collection3d(mesh2)\n \n mesh3 = Poly3DCollection(interface2)\n mesh3.set_edgecolor('none')\n mesh3.set_alpha(0.3);\n# mesh3.set_facecolor('b')\n ax1.add_collection3d(mesh3)\n \n pos = inter.universe.atoms.positions\n pos = np.array([pos[:,0],pos[:,2],pos[:,1]]).T\n pos_liq = pos[liq_in]\n xyz_liq = np.vstack([pos_liq[::, 0], pos_liq[::, 1], pos_liq[::, 2]])\n \n ax1.scatter(xyz_liq[0],xyz_liq[1],xyz_liq[2],color='r')\n \n pos_vap = pos[vap_in]\n xyz_vap = np.vstack([pos_vap[::, 0], pos_vap[::, 1], pos_vap[::, 2]])\n ax1.scatter(xyz_vap[0],xyz_vap[1],xyz_vap[2],color='c')\n \n pos_int = pos[int_in]\n xyz_int = np.vstack([pos_int[::, 0], pos_int[::, 1], pos_int[::, 2]])\n ax1.scatter(xyz_int[0],xyz_int[1],xyz_int[2],color='k')\n\n \n \n pts = np.array(list(product([0,xlim], [0,ylim], [0,zlim]))) \n for s, e in combinations(pts, 2):\n if np.sum(np.abs(s-e)) in (xlim,ylim,zlim):\n ax1.plot3D(*zip(s, e), 'k-',color=box_color,linewidth=1) \n \n \n ax1.set_xlabel(\"x\")\n ax1.set_ylabel(\"z\")\n ax1.set_zlabel(\"y\")\n\n plt.xlim([0,xlim])\n plt.ylim([0,ylim])\n# plt.ylim([0,ylim])\n\n ax1.set_xlim([0,xlim])\n ax1.set_ylim([0,ylim])\n ax1.set_zlim([0,zlim])\n \n # ax1.set_aspect('equal') \n set_axes_equal(ax1)\n ax1.view_init(0, 10)\n plt.tight_layout()\n plt.show()\n\ndef set_axes_radius(ax, origin, radius):\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\n\ndef set_axes_equal(ax):\n '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n '''\n\n limits = np.array([\n ax.get_xlim3d(),\n ax.get_ylim3d(),\n ax.get_zlim3d(),])\n# print(limits)\n origin = np.mean(limits, axis=1)\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\n set_axes_radius(ax, origin, radius)\n\n\n#plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from enum import IntEnum
class DaqListType(IntEnum):
"""
This class describes a daq list type.
"""
DAQ = 0x01
STIM = 0x02
DAQ_STIM = 0x03
|
normal
|
{
"blob_id": "71e0137fc02b4f56bdf87cc15c275f5cca1588c4",
"index": 8925,
"step-1": "<mask token>\n\n\nclass DaqListType(IntEnum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DaqListType(IntEnum):\n <mask token>\n DAQ = 1\n STIM = 2\n DAQ_STIM = 3\n",
"step-3": "<mask token>\n\n\nclass DaqListType(IntEnum):\n \"\"\"\n This class describes a daq list type.\n \"\"\"\n DAQ = 1\n STIM = 2\n DAQ_STIM = 3\n",
"step-4": "from enum import IntEnum\n\n\nclass DaqListType(IntEnum):\n \"\"\"\n This class describes a daq list type.\n \"\"\"\n DAQ = 1\n STIM = 2\n DAQ_STIM = 3\n",
"step-5": "from enum import IntEnum\n\nclass DaqListType(IntEnum):\n \"\"\"\n This class describes a daq list type.\n \"\"\"\n DAQ = 0x01\n STIM = 0x02\n DAQ_STIM = 0x03",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 2.2.2 on 2019-07-17 10:02
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('users', '0003_delete_userprofile'),
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=255)),
('subscribe_to', models.CharField(choices=[('jobs', 'Jobs'), ('posts', 'Posts'), ('newsletter', 'Newsletter')], max_length=100)),
('department', modelcluster.fields.ParentalKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='department_subscriptions', to='users.Department')),
],
options={
'verbose_name': 'Subscription',
'verbose_name_plural': 'Subscriptions',
},
),
]
|
normal
|
{
"blob_id": "cf2c57dbb2c1160321bcd6de98691db48634d5d6",
"index": 5388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0003_delete_userprofile')]\n operations = [migrations.CreateModel(name='Subscription', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('email', models.EmailField(max_length=\n 255)), ('subscribe_to', models.CharField(choices=[('jobs', 'Jobs'),\n ('posts', 'Posts'), ('newsletter', 'Newsletter')], max_length=100)),\n ('department', modelcluster.fields.ParentalKey(null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name=\n 'department_subscriptions', to='users.Department'))], options={\n 'verbose_name': 'Subscription', 'verbose_name_plural':\n 'Subscriptions'})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\nimport modelcluster.fields\n\n\nclass Migration(migrations.Migration):\n dependencies = [('users', '0003_delete_userprofile')]\n operations = [migrations.CreateModel(name='Subscription', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('email', models.EmailField(max_length=\n 255)), ('subscribe_to', models.CharField(choices=[('jobs', 'Jobs'),\n ('posts', 'Posts'), ('newsletter', 'Newsletter')], max_length=100)),\n ('department', modelcluster.fields.ParentalKey(null=True, on_delete\n =django.db.models.deletion.CASCADE, related_name=\n 'department_subscriptions', to='users.Department'))], options={\n 'verbose_name': 'Subscription', 'verbose_name_plural':\n 'Subscriptions'})]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-07-17 10:02\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport modelcluster.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0003_delete_userprofile'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('email', models.EmailField(max_length=255)),\n ('subscribe_to', models.CharField(choices=[('jobs', 'Jobs'), ('posts', 'Posts'), ('newsletter', 'Newsletter')], max_length=100)),\n ('department', modelcluster.fields.ParentalKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='department_subscriptions', to='users.Department')),\n ],\n options={\n 'verbose_name': 'Subscription',\n 'verbose_name_plural': 'Subscriptions',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import numpy as np
from argparse import ArgumentParser
from collections import Counter
from typing import Iterable, Dict, Any, Tuple
from utils.constants import TRAIN, VALID, TEST, SAMPLE_ID, INPUTS, OUTPUT
from utils.file_utils import make_dir
from utils.data_writer import DataWriter
WINDOW = 50
STRIDE = 25
DOWNSAMPLE_SKIP = 3
def get_partition(subject_id: int) -> str:
if subject_id <= 10:
return TEST
elif subject_id <= 15:
return VALID
else:
return TRAIN
def data_generator(input_folder: str) -> Iterable[Tuple[Dict[str, Any], str]]:
sample_id = 0
for subject_id in sorted(os.listdir(args.input_folder)):
folder = os.path.join(args.input_folder, subject_id)
if not os.path.isdir(folder):
continue
for data_file in os.listdir(folder):
try:
dataset = np.loadtxt(os.path.join(folder, data_file),
skiprows=1,
dtype=str)
downsampled_dataset = dataset[::DOWNSAMPLE_SKIP]
for start in range(0, downsampled_dataset.shape[0] - WINDOW + 1, STRIDE):
end = start + WINDOW
data_chunk = downsampled_dataset[start:end].astype(float)
# Element 0 is the timestamp, and the final element is the class label
input_features = data_chunk[:, 1:-1]
labels = data_chunk[:, -1]
if all((label != 0 for label in labels)) and len(input_features) == WINDOW:
sample_dict = {
SAMPLE_ID: sample_id,
OUTPUT: labels[-1],
INPUTS: input_features.astype(float).tolist()
}
yield sample_dict, get_partition(int(subject_id))
sample_id += 1
except ValueError as ex:
print(data_file)
print(ex)
def tokenize_dataset(input_folder: str, output_folder: str, chunk_size: int):
make_dir(output_folder)
data_writers = {
TRAIN: DataWriter(os.path.join(output_folder, TRAIN), file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),
VALID: DataWriter(os.path.join(output_folder, VALID), file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),
TEST: DataWriter(os.path.join(output_folder, TEST), file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size)
}
partition_counters = {
TRAIN: Counter(),
VALID: Counter(),
TEST: Counter()
}
for i, (sample, partition) in enumerate(data_generator(input_folder)):
data_writers[partition].add(sample)
partition_counters[partition][sample[OUTPUT]] += 1
if (i + 1) % chunk_size == 0:
print('Wrote {0} samples.'.format(i+1), end='\r')
print()
for writer in data_writers.values():
writer.close()
print(partition_counters)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input-folder', type=str, required=True)
parser.add_argument('--output-folder', type=str, required=True)
parser.add_argument('--chunk-size', type=int, default=5000)
args = parser.parse_args()
tokenize_dataset(args.input_folder, args.output_folder, args.chunk_size)
|
normal
|
{
"blob_id": "e82dd2792ecbb8ed5a33012239102d2c6a02202b",
"index": 1749,
"step-1": "<mask token>\n\n\ndef get_partition(subject_id: int) ->str:\n if subject_id <= 10:\n return TEST\n elif subject_id <= 15:\n return VALID\n else:\n return TRAIN\n\n\ndef data_generator(input_folder: str) ->Iterable[Tuple[Dict[str, Any], str]]:\n sample_id = 0\n for subject_id in sorted(os.listdir(args.input_folder)):\n folder = os.path.join(args.input_folder, subject_id)\n if not os.path.isdir(folder):\n continue\n for data_file in os.listdir(folder):\n try:\n dataset = np.loadtxt(os.path.join(folder, data_file),\n skiprows=1, dtype=str)\n downsampled_dataset = dataset[::DOWNSAMPLE_SKIP]\n for start in range(0, downsampled_dataset.shape[0] - WINDOW +\n 1, STRIDE):\n end = start + WINDOW\n data_chunk = downsampled_dataset[start:end].astype(float)\n input_features = data_chunk[:, 1:-1]\n labels = data_chunk[:, -1]\n if all(label != 0 for label in labels) and len(\n input_features) == WINDOW:\n sample_dict = {SAMPLE_ID: sample_id, OUTPUT: labels\n [-1], INPUTS: input_features.astype(float).tolist()\n }\n yield sample_dict, get_partition(int(subject_id))\n sample_id += 1\n except ValueError as ex:\n print(data_file)\n print(ex)\n\n\ndef tokenize_dataset(input_folder: str, output_folder: str, chunk_size: int):\n make_dir(output_folder)\n data_writers = {TRAIN: DataWriter(os.path.join(output_folder, TRAIN),\n file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),\n VALID: DataWriter(os.path.join(output_folder, VALID), file_prefix=\n 'data', file_suffix='jsonl.gz', chunk_size=chunk_size), TEST:\n DataWriter(os.path.join(output_folder, TEST), file_prefix='data',\n file_suffix='jsonl.gz', chunk_size=chunk_size)}\n partition_counters = {TRAIN: Counter(), VALID: Counter(), TEST: Counter()}\n for i, (sample, partition) in enumerate(data_generator(input_folder)):\n data_writers[partition].add(sample)\n partition_counters[partition][sample[OUTPUT]] += 1\n if (i + 1) % chunk_size == 0:\n print('Wrote {0} samples.'.format(i + 1), end='\\r')\n print()\n for writer in data_writers.values():\n writer.close()\n print(partition_counters)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_partition(subject_id: int) ->str:\n if subject_id <= 10:\n return TEST\n elif subject_id <= 15:\n return VALID\n else:\n return TRAIN\n\n\ndef data_generator(input_folder: str) ->Iterable[Tuple[Dict[str, Any], str]]:\n sample_id = 0\n for subject_id in sorted(os.listdir(args.input_folder)):\n folder = os.path.join(args.input_folder, subject_id)\n if not os.path.isdir(folder):\n continue\n for data_file in os.listdir(folder):\n try:\n dataset = np.loadtxt(os.path.join(folder, data_file),\n skiprows=1, dtype=str)\n downsampled_dataset = dataset[::DOWNSAMPLE_SKIP]\n for start in range(0, downsampled_dataset.shape[0] - WINDOW +\n 1, STRIDE):\n end = start + WINDOW\n data_chunk = downsampled_dataset[start:end].astype(float)\n input_features = data_chunk[:, 1:-1]\n labels = data_chunk[:, -1]\n if all(label != 0 for label in labels) and len(\n input_features) == WINDOW:\n sample_dict = {SAMPLE_ID: sample_id, OUTPUT: labels\n [-1], INPUTS: input_features.astype(float).tolist()\n }\n yield sample_dict, get_partition(int(subject_id))\n sample_id += 1\n except ValueError as ex:\n print(data_file)\n print(ex)\n\n\ndef tokenize_dataset(input_folder: str, output_folder: str, chunk_size: int):\n make_dir(output_folder)\n data_writers = {TRAIN: DataWriter(os.path.join(output_folder, TRAIN),\n file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),\n VALID: DataWriter(os.path.join(output_folder, VALID), file_prefix=\n 'data', file_suffix='jsonl.gz', chunk_size=chunk_size), TEST:\n DataWriter(os.path.join(output_folder, TEST), file_prefix='data',\n file_suffix='jsonl.gz', chunk_size=chunk_size)}\n partition_counters = {TRAIN: Counter(), VALID: Counter(), TEST: Counter()}\n for i, (sample, partition) in enumerate(data_generator(input_folder)):\n data_writers[partition].add(sample)\n partition_counters[partition][sample[OUTPUT]] += 1\n if (i + 1) % chunk_size == 0:\n print('Wrote {0} samples.'.format(i + 1), end='\\r')\n print()\n for writer in data_writers.values():\n writer.close()\n print(partition_counters)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input-folder', type=str, required=True)\n parser.add_argument('--output-folder', type=str, required=True)\n parser.add_argument('--chunk-size', type=int, default=5000)\n args = parser.parse_args()\n tokenize_dataset(args.input_folder, args.output_folder, args.chunk_size)\n",
"step-3": "<mask token>\nWINDOW = 50\nSTRIDE = 25\nDOWNSAMPLE_SKIP = 3\n\n\ndef get_partition(subject_id: int) ->str:\n if subject_id <= 10:\n return TEST\n elif subject_id <= 15:\n return VALID\n else:\n return TRAIN\n\n\ndef data_generator(input_folder: str) ->Iterable[Tuple[Dict[str, Any], str]]:\n sample_id = 0\n for subject_id in sorted(os.listdir(args.input_folder)):\n folder = os.path.join(args.input_folder, subject_id)\n if not os.path.isdir(folder):\n continue\n for data_file in os.listdir(folder):\n try:\n dataset = np.loadtxt(os.path.join(folder, data_file),\n skiprows=1, dtype=str)\n downsampled_dataset = dataset[::DOWNSAMPLE_SKIP]\n for start in range(0, downsampled_dataset.shape[0] - WINDOW +\n 1, STRIDE):\n end = start + WINDOW\n data_chunk = downsampled_dataset[start:end].astype(float)\n input_features = data_chunk[:, 1:-1]\n labels = data_chunk[:, -1]\n if all(label != 0 for label in labels) and len(\n input_features) == WINDOW:\n sample_dict = {SAMPLE_ID: sample_id, OUTPUT: labels\n [-1], INPUTS: input_features.astype(float).tolist()\n }\n yield sample_dict, get_partition(int(subject_id))\n sample_id += 1\n except ValueError as ex:\n print(data_file)\n print(ex)\n\n\ndef tokenize_dataset(input_folder: str, output_folder: str, chunk_size: int):\n make_dir(output_folder)\n data_writers = {TRAIN: DataWriter(os.path.join(output_folder, TRAIN),\n file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),\n VALID: DataWriter(os.path.join(output_folder, VALID), file_prefix=\n 'data', file_suffix='jsonl.gz', chunk_size=chunk_size), TEST:\n DataWriter(os.path.join(output_folder, TEST), file_prefix='data',\n file_suffix='jsonl.gz', chunk_size=chunk_size)}\n partition_counters = {TRAIN: Counter(), VALID: Counter(), TEST: Counter()}\n for i, (sample, partition) in enumerate(data_generator(input_folder)):\n data_writers[partition].add(sample)\n partition_counters[partition][sample[OUTPUT]] += 1\n if (i + 1) % chunk_size == 0:\n print('Wrote {0} samples.'.format(i + 1), end='\\r')\n print()\n for writer in data_writers.values():\n writer.close()\n print(partition_counters)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input-folder', type=str, required=True)\n parser.add_argument('--output-folder', type=str, required=True)\n parser.add_argument('--chunk-size', type=int, default=5000)\n args = parser.parse_args()\n tokenize_dataset(args.input_folder, args.output_folder, args.chunk_size)\n",
"step-4": "import os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom collections import Counter\nfrom typing import Iterable, Dict, Any, Tuple\nfrom utils.constants import TRAIN, VALID, TEST, SAMPLE_ID, INPUTS, OUTPUT\nfrom utils.file_utils import make_dir\nfrom utils.data_writer import DataWriter\nWINDOW = 50\nSTRIDE = 25\nDOWNSAMPLE_SKIP = 3\n\n\ndef get_partition(subject_id: int) ->str:\n if subject_id <= 10:\n return TEST\n elif subject_id <= 15:\n return VALID\n else:\n return TRAIN\n\n\ndef data_generator(input_folder: str) ->Iterable[Tuple[Dict[str, Any], str]]:\n sample_id = 0\n for subject_id in sorted(os.listdir(args.input_folder)):\n folder = os.path.join(args.input_folder, subject_id)\n if not os.path.isdir(folder):\n continue\n for data_file in os.listdir(folder):\n try:\n dataset = np.loadtxt(os.path.join(folder, data_file),\n skiprows=1, dtype=str)\n downsampled_dataset = dataset[::DOWNSAMPLE_SKIP]\n for start in range(0, downsampled_dataset.shape[0] - WINDOW +\n 1, STRIDE):\n end = start + WINDOW\n data_chunk = downsampled_dataset[start:end].astype(float)\n input_features = data_chunk[:, 1:-1]\n labels = data_chunk[:, -1]\n if all(label != 0 for label in labels) and len(\n input_features) == WINDOW:\n sample_dict = {SAMPLE_ID: sample_id, OUTPUT: labels\n [-1], INPUTS: input_features.astype(float).tolist()\n }\n yield sample_dict, get_partition(int(subject_id))\n sample_id += 1\n except ValueError as ex:\n print(data_file)\n print(ex)\n\n\ndef tokenize_dataset(input_folder: str, output_folder: str, chunk_size: int):\n make_dir(output_folder)\n data_writers = {TRAIN: DataWriter(os.path.join(output_folder, TRAIN),\n file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),\n VALID: DataWriter(os.path.join(output_folder, VALID), file_prefix=\n 'data', file_suffix='jsonl.gz', chunk_size=chunk_size), TEST:\n DataWriter(os.path.join(output_folder, TEST), file_prefix='data',\n file_suffix='jsonl.gz', chunk_size=chunk_size)}\n partition_counters = {TRAIN: Counter(), VALID: Counter(), TEST: Counter()}\n for i, (sample, partition) in enumerate(data_generator(input_folder)):\n data_writers[partition].add(sample)\n partition_counters[partition][sample[OUTPUT]] += 1\n if (i + 1) % chunk_size == 0:\n print('Wrote {0} samples.'.format(i + 1), end='\\r')\n print()\n for writer in data_writers.values():\n writer.close()\n print(partition_counters)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input-folder', type=str, required=True)\n parser.add_argument('--output-folder', type=str, required=True)\n parser.add_argument('--chunk-size', type=int, default=5000)\n args = parser.parse_args()\n tokenize_dataset(args.input_folder, args.output_folder, args.chunk_size)\n",
"step-5": "import os\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom collections import Counter\nfrom typing import Iterable, Dict, Any, Tuple\n\nfrom utils.constants import TRAIN, VALID, TEST, SAMPLE_ID, INPUTS, OUTPUT\nfrom utils.file_utils import make_dir\nfrom utils.data_writer import DataWriter\n\n\nWINDOW = 50\nSTRIDE = 25\nDOWNSAMPLE_SKIP = 3\n\n\ndef get_partition(subject_id: int) -> str:\n if subject_id <= 10:\n return TEST\n elif subject_id <= 15:\n return VALID\n else:\n return TRAIN\n\n\ndef data_generator(input_folder: str) -> Iterable[Tuple[Dict[str, Any], str]]:\n sample_id = 0\n for subject_id in sorted(os.listdir(args.input_folder)):\n folder = os.path.join(args.input_folder, subject_id)\n if not os.path.isdir(folder):\n continue\n\n for data_file in os.listdir(folder):\n try:\n dataset = np.loadtxt(os.path.join(folder, data_file),\n skiprows=1,\n dtype=str)\n downsampled_dataset = dataset[::DOWNSAMPLE_SKIP]\n\n for start in range(0, downsampled_dataset.shape[0] - WINDOW + 1, STRIDE):\n end = start + WINDOW\n data_chunk = downsampled_dataset[start:end].astype(float)\n\n # Element 0 is the timestamp, and the final element is the class label\n input_features = data_chunk[:, 1:-1]\n labels = data_chunk[:, -1]\n\n if all((label != 0 for label in labels)) and len(input_features) == WINDOW:\n sample_dict = {\n SAMPLE_ID: sample_id,\n OUTPUT: labels[-1],\n INPUTS: input_features.astype(float).tolist()\n }\n yield sample_dict, get_partition(int(subject_id))\n\n sample_id += 1\n\n except ValueError as ex:\n print(data_file)\n print(ex)\n\n\ndef tokenize_dataset(input_folder: str, output_folder: str, chunk_size: int):\n make_dir(output_folder)\n data_writers = {\n TRAIN: DataWriter(os.path.join(output_folder, TRAIN), file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),\n VALID: DataWriter(os.path.join(output_folder, VALID), file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size),\n TEST: DataWriter(os.path.join(output_folder, TEST), file_prefix='data', file_suffix='jsonl.gz', chunk_size=chunk_size)\n }\n\n partition_counters = {\n TRAIN: Counter(),\n VALID: Counter(),\n TEST: Counter()\n }\n\n for i, (sample, partition) in enumerate(data_generator(input_folder)):\n data_writers[partition].add(sample)\n partition_counters[partition][sample[OUTPUT]] += 1\n\n if (i + 1) % chunk_size == 0:\n print('Wrote {0} samples.'.format(i+1), end='\\r')\n print()\n\n for writer in data_writers.values():\n writer.close()\n\n print(partition_counters)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--input-folder', type=str, required=True)\n parser.add_argument('--output-folder', type=str, required=True)\n parser.add_argument('--chunk-size', type=int, default=5000)\n args = parser.parse_args()\n\n tokenize_dataset(args.input_folder, args.output_folder, args.chunk_size)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sqlite3
class DatabaseHands(object):
def __init__(self, database):
self.conn = sqlite3.connect(database)
self.cur = self.conn.cursor()
self.cur.execute("CREATE TABLE IF NOT EXISTS hands"
+ "(id INTEGER PRIMARY KEY, first INTEGER,"
+ "second INTEGER, third INTEGER)")
self.conn.commit()
def count(self):
self.cur.execute("SELECT count(*) FROM hands")
rows = self.cur.fetchone()
return rows[0]
def insert(self, hands):
self.cur.executemany("INSERT INTO hands VALUES (NULL,?,?,?)", hands)
self.conn.commit()
# def view(self):
# self.cur.execute("SELECT * FROM hands")
# rows = self.cur.fetchall()
# return rows
def search(self, id):
self.cur.execute("SELECT * FROM hands WHERE id=?", (id,))
row = self.cur.fetchone()
return (row[1], row[2], row[3])
def __del__(self):
self.conn.close()
class DatabaseProbability(object):
def __init__(self, database):
self.conn = sqlite3.connect(database)
self.cur = self.conn.cursor()
self.cur.execute("CREATE TABLE IF NOT EXISTS probabilities"
+ "(id INTEGER PRIMARY KEY, card INTEGER,"
+ "win REAL, draw REAL, lose REAL)")
self.conn.commit()
def insert(self, probabilities):
self.cur.executemany("INSERT INTO probabilities VALUES (NULL,?,?,?,?)",
probabilities)
self.conn.commit()
# def view(self):
# self.cur.execute("SELECT * FROM probabilities")
# rows = self.cur.fetchall()
# return rows
def search(self, card):
self.cur.execute("SELECT * FROM probabilities WHERE card=?", (card,))
row = self.cur.fetchone()
return (row[2], row[3], row[4])
def __del__(self):
self.conn.close()
class DatabaseGames(object):
def __init__(self, database):
self.conn = sqlite3.connect(database)
self.cur = self.conn.cursor()
self.cur.execute("CREATE TABLE IF NOT EXISTS games"
+ "(id INTEGER PRIMARY KEY,"
+ " card1 INTEGER, card2 INTEGER, card3 INTEGER,"
+ " win REAL, draw REAL, lose REAL)")
self.conn.commit()
def count(self):
self.cur.execute("SELECT count(*) FROM games")
rows = self.cur.fetchone()
return rows[0]
def insert(self, card1, card2, card3, win, draw, lose):
self.cur.execute("INSERT INTO games VALUES"
+ " (NULL,?,?,?,?,?,?)",
(card1, card2, card3, win, draw, lose))
self.conn.commit()
def view(self):
self.cur.execute("SELECT * FROM games")
rows = self.cur.fetchall()
return rows
def search(self, card1="", card2="", card3=""):
self.cur.execute("SELECT * FROM games WHERE"
+ " card1=? AND card2=? AND card3=?",
(card1, card2, card3))
row = self.cur.fetchone()
return row
def __del__(self):
self.conn.close()
|
normal
|
{
"blob_id": "f8c85f34fb55ee1c3b3020bcec87b60ae80e4ed2",
"index": 3126,
"step-1": "<mask token>\n\n\nclass DatabaseHands(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DatabaseProbability(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS probabilities' +\n '(id INTEGER PRIMARY KEY, card INTEGER,' +\n 'win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def insert(self, probabilities):\n self.cur.executemany('INSERT INTO probabilities VALUES (NULL,?,?,?,?)',\n probabilities)\n self.conn.commit()\n\n def search(self, card):\n self.cur.execute('SELECT * FROM probabilities WHERE card=?', (card,))\n row = self.cur.fetchone()\n return row[2], row[3], row[4]\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseGames(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS games' +\n '(id INTEGER PRIMARY KEY,' +\n ' card1 INTEGER, card2 INTEGER, card3 INTEGER,' +\n ' win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def count(self):\n self.cur.execute('SELECT count(*) FROM games')\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, card1, card2, card3, win, draw, lose):\n self.cur.execute('INSERT INTO games VALUES' + ' (NULL,?,?,?,?,?,?)',\n (card1, card2, card3, win, draw, lose))\n self.conn.commit()\n\n def view(self):\n self.cur.execute('SELECT * FROM games')\n rows = self.cur.fetchall()\n return rows\n\n def search(self, card1='', card2='', card3=''):\n self.cur.execute('SELECT * FROM games WHERE' +\n ' card1=? AND card2=? AND card3=?', (card1, card2, card3))\n row = self.cur.fetchone()\n return row\n\n def __del__(self):\n self.conn.close()\n",
"step-2": "<mask token>\n\n\nclass DatabaseHands(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS hands' +\n '(id INTEGER PRIMARY KEY, first INTEGER,' +\n 'second INTEGER, third INTEGER)')\n self.conn.commit()\n <mask token>\n\n def insert(self, hands):\n self.cur.executemany('INSERT INTO hands VALUES (NULL,?,?,?)', hands)\n self.conn.commit()\n\n def search(self, id):\n self.cur.execute('SELECT * FROM hands WHERE id=?', (id,))\n row = self.cur.fetchone()\n return row[1], row[2], row[3]\n <mask token>\n\n\nclass DatabaseProbability(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS probabilities' +\n '(id INTEGER PRIMARY KEY, card INTEGER,' +\n 'win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def insert(self, probabilities):\n self.cur.executemany('INSERT INTO probabilities VALUES (NULL,?,?,?,?)',\n probabilities)\n self.conn.commit()\n\n def search(self, card):\n self.cur.execute('SELECT * FROM probabilities WHERE card=?', (card,))\n row = self.cur.fetchone()\n return row[2], row[3], row[4]\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseGames(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS games' +\n '(id INTEGER PRIMARY KEY,' +\n ' card1 INTEGER, card2 INTEGER, card3 INTEGER,' +\n ' win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def count(self):\n self.cur.execute('SELECT count(*) FROM games')\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, card1, card2, card3, win, draw, lose):\n self.cur.execute('INSERT INTO games VALUES' + ' (NULL,?,?,?,?,?,?)',\n (card1, card2, card3, win, draw, lose))\n self.conn.commit()\n\n def view(self):\n self.cur.execute('SELECT * FROM games')\n rows = self.cur.fetchall()\n return rows\n\n def search(self, card1='', card2='', card3=''):\n self.cur.execute('SELECT * FROM games WHERE' +\n ' card1=? AND card2=? AND card3=?', (card1, card2, card3))\n row = self.cur.fetchone()\n return row\n\n def __del__(self):\n self.conn.close()\n",
"step-3": "<mask token>\n\n\nclass DatabaseHands(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS hands' +\n '(id INTEGER PRIMARY KEY, first INTEGER,' +\n 'second INTEGER, third INTEGER)')\n self.conn.commit()\n <mask token>\n\n def insert(self, hands):\n self.cur.executemany('INSERT INTO hands VALUES (NULL,?,?,?)', hands)\n self.conn.commit()\n\n def search(self, id):\n self.cur.execute('SELECT * FROM hands WHERE id=?', (id,))\n row = self.cur.fetchone()\n return row[1], row[2], row[3]\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseProbability(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS probabilities' +\n '(id INTEGER PRIMARY KEY, card INTEGER,' +\n 'win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def insert(self, probabilities):\n self.cur.executemany('INSERT INTO probabilities VALUES (NULL,?,?,?,?)',\n probabilities)\n self.conn.commit()\n\n def search(self, card):\n self.cur.execute('SELECT * FROM probabilities WHERE card=?', (card,))\n row = self.cur.fetchone()\n return row[2], row[3], row[4]\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseGames(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS games' +\n '(id INTEGER PRIMARY KEY,' +\n ' card1 INTEGER, card2 INTEGER, card3 INTEGER,' +\n ' win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def count(self):\n self.cur.execute('SELECT count(*) FROM games')\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, card1, card2, card3, win, draw, lose):\n self.cur.execute('INSERT INTO games VALUES' + ' (NULL,?,?,?,?,?,?)',\n (card1, card2, card3, win, draw, lose))\n self.conn.commit()\n\n def view(self):\n self.cur.execute('SELECT * FROM games')\n rows = self.cur.fetchall()\n return rows\n\n def search(self, card1='', card2='', card3=''):\n self.cur.execute('SELECT * FROM games WHERE' +\n ' card1=? AND card2=? AND card3=?', (card1, card2, card3))\n row = self.cur.fetchone()\n return row\n\n def __del__(self):\n self.conn.close()\n",
"step-4": "import sqlite3\n\n\nclass DatabaseHands(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS hands' +\n '(id INTEGER PRIMARY KEY, first INTEGER,' +\n 'second INTEGER, third INTEGER)')\n self.conn.commit()\n\n def count(self):\n self.cur.execute('SELECT count(*) FROM hands')\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, hands):\n self.cur.executemany('INSERT INTO hands VALUES (NULL,?,?,?)', hands)\n self.conn.commit()\n\n def search(self, id):\n self.cur.execute('SELECT * FROM hands WHERE id=?', (id,))\n row = self.cur.fetchone()\n return row[1], row[2], row[3]\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseProbability(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS probabilities' +\n '(id INTEGER PRIMARY KEY, card INTEGER,' +\n 'win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def insert(self, probabilities):\n self.cur.executemany('INSERT INTO probabilities VALUES (NULL,?,?,?,?)',\n probabilities)\n self.conn.commit()\n\n def search(self, card):\n self.cur.execute('SELECT * FROM probabilities WHERE card=?', (card,))\n row = self.cur.fetchone()\n return row[2], row[3], row[4]\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseGames(object):\n\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute('CREATE TABLE IF NOT EXISTS games' +\n '(id INTEGER PRIMARY KEY,' +\n ' card1 INTEGER, card2 INTEGER, card3 INTEGER,' +\n ' win REAL, draw REAL, lose REAL)')\n self.conn.commit()\n\n def count(self):\n self.cur.execute('SELECT count(*) FROM games')\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, card1, card2, card3, win, draw, lose):\n self.cur.execute('INSERT INTO games VALUES' + ' (NULL,?,?,?,?,?,?)',\n (card1, card2, card3, win, draw, lose))\n self.conn.commit()\n\n def view(self):\n self.cur.execute('SELECT * FROM games')\n rows = self.cur.fetchall()\n return rows\n\n def search(self, card1='', card2='', card3=''):\n self.cur.execute('SELECT * FROM games WHERE' +\n ' card1=? AND card2=? AND card3=?', (card1, card2, card3))\n row = self.cur.fetchone()\n return row\n\n def __del__(self):\n self.conn.close()\n",
"step-5": "import sqlite3\n\n\nclass DatabaseHands(object):\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS hands\"\n + \"(id INTEGER PRIMARY KEY, first INTEGER,\"\n + \"second INTEGER, third INTEGER)\")\n self.conn.commit()\n\n def count(self):\n self.cur.execute(\"SELECT count(*) FROM hands\")\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, hands):\n self.cur.executemany(\"INSERT INTO hands VALUES (NULL,?,?,?)\", hands)\n self.conn.commit()\n\n # def view(self):\n # self.cur.execute(\"SELECT * FROM hands\")\n # rows = self.cur.fetchall()\n # return rows\n\n def search(self, id):\n self.cur.execute(\"SELECT * FROM hands WHERE id=?\", (id,))\n row = self.cur.fetchone()\n return (row[1], row[2], row[3])\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseProbability(object):\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS probabilities\"\n + \"(id INTEGER PRIMARY KEY, card INTEGER,\"\n + \"win REAL, draw REAL, lose REAL)\")\n self.conn.commit()\n\n def insert(self, probabilities):\n self.cur.executemany(\"INSERT INTO probabilities VALUES (NULL,?,?,?,?)\",\n probabilities)\n self.conn.commit()\n\n # def view(self):\n # self.cur.execute(\"SELECT * FROM probabilities\")\n # rows = self.cur.fetchall()\n # return rows\n\n def search(self, card):\n self.cur.execute(\"SELECT * FROM probabilities WHERE card=?\", (card,))\n row = self.cur.fetchone()\n return (row[2], row[3], row[4])\n\n def __del__(self):\n self.conn.close()\n\n\nclass DatabaseGames(object):\n def __init__(self, database):\n self.conn = sqlite3.connect(database)\n self.cur = self.conn.cursor()\n self.cur.execute(\"CREATE TABLE IF NOT EXISTS games\"\n + \"(id INTEGER PRIMARY KEY,\"\n + \" card1 INTEGER, card2 INTEGER, card3 INTEGER,\"\n + \" win REAL, draw REAL, lose REAL)\")\n self.conn.commit()\n\n def count(self):\n self.cur.execute(\"SELECT count(*) FROM games\")\n rows = self.cur.fetchone()\n return rows[0]\n\n def insert(self, card1, card2, card3, win, draw, lose):\n self.cur.execute(\"INSERT INTO games VALUES\"\n + \" (NULL,?,?,?,?,?,?)\",\n (card1, card2, card3, win, draw, lose))\n self.conn.commit()\n\n def view(self):\n self.cur.execute(\"SELECT * FROM games\")\n rows = self.cur.fetchall()\n return rows\n\n def search(self, card1=\"\", card2=\"\", card3=\"\"):\n self.cur.execute(\"SELECT * FROM games WHERE\"\n + \" card1=? AND card2=? AND card3=?\",\n (card1, card2, card3))\n row = self.cur.fetchone()\n return row\n\n def __del__(self):\n self.conn.close()\n",
"step-ids": [
13,
16,
17,
19,
20
]
}
|
[
13,
16,
17,
19,
20
] |
"""tables
Revision ID: 35f6815c3112
Revises: None
Create Date: 2013-07-28 21:15:38.385006
"""
# revision identifiers, used by Alembic.
revision = '35f6815c3112'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=64), nullable=True),
sa.Column('lastname', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password', sa.String(length=64), nullable=True),
sa.Column('address', sa.String(length=120), nullable=True),
sa.Column('city', sa.String(length=64), nullable=True),
sa.Column('state', sa.String(length=64), nullable=True),
sa.Column('zipcode', sa.String(length=64), nullable=True),
sa.Column('country', sa.String(length=64), nullable=True),
sa.Column('role', sa.Integer(), nullable=True),
sa.Column('dob', sa.DateTime(), nullable=True),
sa.Column('gender', sa.String(length=64), nullable=True),
sa.Column('fitness', sa.Integer(), nullable=True),
sa.Column('experience', sa.Integer(), nullable=True),
sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('health_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('issue', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users_health',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('health_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['health_id'], ['health_types.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('positions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('position_type', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('positions')
op.drop_table('users_health')
op.drop_table('health_types')
op.drop_table('users')
### end Alembic commands ###
|
normal
|
{
"blob_id": "9989d31dfe13809d67f629cc283cd02ce354a74e",
"index": 115,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n",
"step-3": "<mask token>\nrevision = '35f6815c3112'\ndown_revision = None\n<mask token>\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n",
"step-4": "<mask token>\nrevision = '35f6815c3112'\ndown_revision = None\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table('users', sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True), sa.\n Column('lastname', sa.String(length=64), nullable=True), sa.Column(\n 'email', sa.String(length=120), nullable=True), sa.Column(\n 'password', sa.String(length=64), nullable=True), sa.Column(\n 'address', sa.String(length=120), nullable=True), sa.Column('city',\n sa.String(length=64), nullable=True), sa.Column('state', sa.String(\n length=64), nullable=True), sa.Column('zipcode', sa.String(length=\n 64), nullable=True), sa.Column('country', sa.String(length=64),\n nullable=True), sa.Column('role', sa.Integer(), nullable=True), sa.\n Column('dob', sa.DateTime(), nullable=True), sa.Column('gender', sa\n .String(length=64), nullable=True), sa.Column('fitness', sa.Integer\n (), nullable=True), sa.Column('experience', sa.Integer(), nullable=\n True), sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id'))\n op.create_table('health_types', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('issue', sa.String(length=64), nullable=True), sa\n .PrimaryKeyConstraint('id'))\n op.create_table('users_health', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('health_id', sa.Integer(), nullable=True), sa.\n ForeignKeyConstraint(['health_id'], ['health_types.id']), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n op.create_table('positions', sa.Column('id', sa.Integer(), nullable=\n False), sa.Column('user_id', sa.Integer(), nullable=True), sa.\n Column('position_type', sa.String(length=64), nullable=True), sa.\n ForeignKeyConstraint(['user_id'], ['users.id']), sa.\n PrimaryKeyConstraint('id'))\n\n\ndef downgrade():\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n",
"step-5": "\"\"\"tables\n\nRevision ID: 35f6815c3112\nRevises: None\nCreate Date: 2013-07-28 21:15:38.385006\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '35f6815c3112'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True),\n sa.Column('lastname', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password', sa.String(length=64), nullable=True),\n sa.Column('address', sa.String(length=120), nullable=True),\n sa.Column('city', sa.String(length=64), nullable=True),\n sa.Column('state', sa.String(length=64), nullable=True),\n sa.Column('zipcode', sa.String(length=64), nullable=True),\n sa.Column('country', sa.String(length=64), nullable=True),\n sa.Column('role', sa.Integer(), nullable=True),\n sa.Column('dob', sa.DateTime(), nullable=True),\n sa.Column('gender', sa.String(length=64), nullable=True),\n sa.Column('fitness', sa.Integer(), nullable=True),\n sa.Column('experience', sa.Integer(), nullable=True),\n sa.Column('willing_teamLeader', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('health_types',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('issue', sa.String(length=64), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users_health',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('health_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['health_id'], ['health_types.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('positions',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('position_type', sa.String(length=64), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('positions')\n op.drop_table('users_health')\n op.drop_table('health_types')\n op.drop_table('users')\n ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
lemonpie.debug = True
lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(lemonpie)
lemonpie.run('0.0.0.0')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
lemonpie.debug = True
lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(lemonpie)
lemonpie.run('0.0.0.0')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from lemonpie import lemonpie
from flask_debugtoolbar import DebugToolbarExtension
def main():
lemonpie.debug = True
lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(lemonpie)
lemonpie.run('0.0.0.0')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
from lemonpie import lemonpie
from flask_debugtoolbar import DebugToolbarExtension
def main():
lemonpie.debug = True
lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(lemonpie)
lemonpie.run('0.0.0.0')
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "328c483bf59c6b84090e6bef8814e829398c5a56",
"index": 6954,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from lemonpie import lemonpie\nfrom flask_debugtoolbar import DebugToolbarExtension\n\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\nfrom lemonpie import lemonpie\nfrom flask_debugtoolbar import DebugToolbarExtension\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('blog', '0015_auto_20190709_1543')]
operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('model_pic', models.ImageField(null=
True, upload_to='image/'))]), migrations.RemoveField(model_name=
'post', name='photo')]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('blog', '0015_auto_20190709_1543')]
operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('model_pic', models.ImageField(null=
True, upload_to='image/'))]), migrations.RemoveField(model_name=
'post', name='photo')]
<|reserved_special_token_1|>
# Generated by Django 2.2.2 on 2019-07-09 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_auto_20190709_1543'),
]
operations = [
migrations.CreateModel(
name='ExampleModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_pic', models.ImageField(null=True, upload_to='image/')),
],
),
migrations.RemoveField(
model_name='post',
name='photo',
),
]
|
flexible
|
{
"blob_id": "d6e06a78c9a5d8184e5adf9b99cc6030c3434558",
"index": 8464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0015_auto_20190709_1543')]\n operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('model_pic', models.ImageField(null=\n True, upload_to='image/'))]), migrations.RemoveField(model_name=\n 'post', name='photo')]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0015_auto_20190709_1543')]\n operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('model_pic', models.ImageField(null=\n True, upload_to='image/'))]), migrations.RemoveField(model_name=\n 'post', name='photo')]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-07-09 20:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0015_auto_20190709_1543'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExampleModel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('model_pic', models.ImageField(null=True, upload_to='image/')),\n ],\n ),\n migrations.RemoveField(\n model_name='post',\n name='photo',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,
hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,
hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations
):
return [word for word in corpus if prons.pronunciation(word) is None and
all(ord(char) > 255 for char in word)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,
hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations
):
return [word for word in corpus if prons.pronunciation(word) is None and
all(ord(char) > 255 for char in word)]
if __name__ == '__main__':
synonyms = ZhTopolectSynonyms.from_local_folder(
'../data/enwiktionary/module-zh-data-json/dial-syn')
mp = MandarinPronunciations.from_local_json_file(
'../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')
missing_mandarin_prons = iter(words_missing_prons(synonyms.
mandarin_words(), mp))
h = ZhTopolectPronunciations.from_local_json_folder(
'../data/enwiktionary/module-zh-data-json/nan-pron')
missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator
(synonyms, h))
today = datetime.today().strftime('%Y%m%d')
lines_to_textfile(
f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',
missing_hokkien_prons)
lines_to_textfile(
f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',
missing_mandarin_prons)
<|reserved_special_token_1|>
from datetime import datetime
from iohelpers import lines_to_textfile
from typing import Iterator, List, Sequence
from zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,
hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations
):
return [word for word in corpus if prons.pronunciation(word) is None and
all(ord(char) > 255 for char in word)]
if __name__ == '__main__':
synonyms = ZhTopolectSynonyms.from_local_folder(
'../data/enwiktionary/module-zh-data-json/dial-syn')
mp = MandarinPronunciations.from_local_json_file(
'../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')
missing_mandarin_prons = iter(words_missing_prons(synonyms.
mandarin_words(), mp))
h = ZhTopolectPronunciations.from_local_json_folder(
'../data/enwiktionary/module-zh-data-json/nan-pron')
missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator
(synonyms, h))
today = datetime.today().strftime('%Y%m%d')
lines_to_textfile(
f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',
missing_hokkien_prons)
lines_to_textfile(
f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',
missing_mandarin_prons)
<|reserved_special_token_1|>
from datetime import datetime
from iohelpers import lines_to_textfile
from typing import Iterator, List, Sequence
from zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations
def missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations):
all_hokkien = set()
for word, syn_data in synonyms.all_words():
minnan = set(syn_data['Philippine-MN'])
minnan.update(syn_data['Quanzhou'])
minnan.update(syn_data['Xiamen'])
for hokkien in minnan:
banlamoe = hokkien.split(':')
all_hokkien.add(banlamoe[0])
return words_missing_prons(all_hokkien, hokprons)
def words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations):
return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)]
if __name__ == '__main__':
synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn')
mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')
missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp))
h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron')
missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h))
today = datetime.today().strftime("%Y%m%d")
lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons)
lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)
|
flexible
|
{
"blob_id": "18366633489d905c96b0c30d65442bc2e2b188ea",
"index": 4703,
"step-1": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder(\n '../data/enwiktionary/module-zh-data-json/dial-syn')\n mp = MandarinPronunciations.from_local_json_file(\n '../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.\n mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder(\n '../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator\n (synonyms, h))\n today = datetime.today().strftime('%Y%m%d')\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',\n missing_hokkien_prons)\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',\n missing_mandarin_prons)\n",
"step-4": "from datetime import datetime\nfrom iohelpers import lines_to_textfile\nfrom typing import Iterator, List, Sequence\nfrom zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms,\n hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations\n ):\n return [word for word in corpus if prons.pronunciation(word) is None and\n all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder(\n '../data/enwiktionary/module-zh-data-json/dial-syn')\n mp = MandarinPronunciations.from_local_json_file(\n '../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.\n mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder(\n '../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator\n (synonyms, h))\n today = datetime.today().strftime('%Y%m%d')\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt',\n missing_hokkien_prons)\n lines_to_textfile(\n f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt',\n missing_mandarin_prons)\n",
"step-5": "from datetime import datetime\nfrom iohelpers import lines_to_textfile\nfrom typing import Iterator, List, Sequence\nfrom zhmodules import ZhTopolectSynonyms, MandarinPronunciations, ZhTopolectPronunciations\n\n\ndef missing_philippine_hokkien_words_generator(synonyms: ZhTopolectSynonyms, hokprons: ZhTopolectPronunciations):\n all_hokkien = set()\n for word, syn_data in synonyms.all_words():\n minnan = set(syn_data['Philippine-MN'])\n minnan.update(syn_data['Quanzhou'])\n minnan.update(syn_data['Xiamen'])\n\n for hokkien in minnan:\n banlamoe = hokkien.split(':')\n all_hokkien.add(banlamoe[0])\n\n return words_missing_prons(all_hokkien, hokprons)\n\n\ndef words_missing_prons(corpus: Sequence[str], prons: ZhTopolectPronunciations):\n return [word for word in corpus if prons.pronunciation(word) is None and all(ord(char) > 255 for char in word)]\n\n\nif __name__ == '__main__':\n synonyms = ZhTopolectSynonyms.from_local_folder('../data/enwiktionary/module-zh-data-json/dial-syn')\n\n mp = MandarinPronunciations.from_local_json_file('../data/enwiktionary/module-zh-data-json/combined-mandarin-pron.json')\n missing_mandarin_prons = iter(words_missing_prons(synonyms.mandarin_words(), mp))\n h = ZhTopolectPronunciations.from_local_json_folder('../data/enwiktionary/module-zh-data-json/nan-pron')\n missing_hokkien_prons = iter(missing_philippine_hokkien_words_generator(synonyms, h))\n\n today = datetime.today().strftime(\"%Y%m%d\")\n lines_to_textfile(f'../data/enwiktionary/words-search/missing-hokkien.{today}.txt', missing_hokkien_prons)\n lines_to_textfile(f'../data/enwiktionary/words-search/missing-mandarin.{today}.txt', missing_mandarin_prons)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Modell(Resource):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def put(self, name):
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
<|reserved_special_token_0|>
class Storelist(Resource):
def get(self):
return {'item': [x for x in StoreModel.query.all()]}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Modell(Resource):
<|reserved_special_token_0|>
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': 'sorry no store available in this name'}
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
<|reserved_special_token_0|>
class Storelist(Resource):
def get(self):
return {'item': [x for x in StoreModel.query.all()]}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Modell(Resource):
def get(self, name):
item = StoreModel.find_by_name(name)
return item.json()
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': 'sorry no store available in this name'}
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
<|reserved_special_token_0|>
class Storelist(Resource):
def get(self):
return {'item': [x for x in StoreModel.query.all()]}
<|reserved_special_token_1|>
from flask_restful import Resource, reqparse
import sqlite3
from flask_jwt import jwt_required
from models.item_model import ItemModel
from flask_sqlalchemy import SQLAlchemy
from d import db
from models.store_model import StoreModel
class Modell(Resource):
def get(self, name):
item = StoreModel.find_by_name(name)
return item.json()
def post(self, name):
if StoreModel.find_by_name(name):
return {'message': 'sorry no store available in this name'}
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
def delete(self, name):
item = StoreModel.find_by_name(name)
if item:
item.delete_from_db()
return {'m': 'delted successfully'}
class Storelist(Resource):
def get(self):
return {'item': [x for x in StoreModel.query.all()]}
<|reserved_special_token_1|>
from flask_restful import Resource, reqparse
import sqlite3
from flask_jwt import jwt_required
from models.item_model import ItemModel
from flask_sqlalchemy import SQLAlchemy
from d import db
from models.store_model import StoreModel
class Modell(Resource):
def get(self, name):
item = StoreModel.find_by_name(name)
return item.json()
def post(self, name):
if StoreModel.find_by_name(name):
return {"message": "sorry no store available in this name"}
#data = Modell.requested.parse_args()
item = StoreModel(name)
item.save_to_db()
return item.json()
def put(self, name):
# data = Modell.requested.parse_args()
item = StoreModel.find_by_name(name)
item.save_to_db()
return item.json()
def delete(self, name):
item=StoreModel.find_by_name(name)
if item:
item.delete_from_db()
return {"m":"delted successfully"}
class Storelist(Resource):
def get(self):
return {"item":[x for x in StoreModel.query.all()]}
|
flexible
|
{
"blob_id": "5616ec135a2233e742ff3b2b1f378ec12298b935",
"index": 9578,
"step-1": "<mask token>\n\n\nclass Modell(Resource):\n <mask token>\n <mask token>\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-2": "<mask token>\n\n\nclass Modell(Resource):\n <mask token>\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-3": "<mask token>\n\n\nclass Modell(Resource):\n\n def get(self, name):\n item = StoreModel.find_by_name(name)\n return item.json()\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n <mask token>\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-4": "from flask_restful import Resource, reqparse\nimport sqlite3\nfrom flask_jwt import jwt_required\nfrom models.item_model import ItemModel\nfrom flask_sqlalchemy import SQLAlchemy\nfrom d import db\nfrom models.store_model import StoreModel\n\n\nclass Modell(Resource):\n\n def get(self, name):\n item = StoreModel.find_by_name(name)\n return item.json()\n\n def post(self, name):\n if StoreModel.find_by_name(name):\n return {'message': 'sorry no store available in this name'}\n item = StoreModel(name)\n item.save_to_db()\n return item.json()\n\n def put(self, name):\n item = StoreModel.find_by_name(name)\n item.save_to_db()\n return item.json()\n\n def delete(self, name):\n item = StoreModel.find_by_name(name)\n if item:\n item.delete_from_db()\n return {'m': 'delted successfully'}\n\n\nclass Storelist(Resource):\n\n def get(self):\n return {'item': [x for x in StoreModel.query.all()]}\n",
"step-5": "from flask_restful import Resource, reqparse\r\nimport sqlite3\r\nfrom flask_jwt import jwt_required\r\nfrom models.item_model import ItemModel\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom d import db\r\nfrom models.store_model import StoreModel\r\n\r\n\r\nclass Modell(Resource):\r\n\r\n\r\n def get(self, name):\r\n item = StoreModel.find_by_name(name)\r\n return item.json()\r\n\r\n\r\n def post(self, name):\r\n if StoreModel.find_by_name(name):\r\n return {\"message\": \"sorry no store available in this name\"}\r\n #data = Modell.requested.parse_args()\r\n item = StoreModel(name)\r\n item.save_to_db()\r\n return item.json()\r\n\r\n\r\n def put(self, name):\r\n# data = Modell.requested.parse_args()\r\n item = StoreModel.find_by_name(name)\r\n\r\n\r\n\r\n item.save_to_db()\r\n return item.json()\r\n\r\n\r\n def delete(self, name):\r\n item=StoreModel.find_by_name(name)\r\n if item:\r\n item.delete_from_db()\r\n return {\"m\":\"delted successfully\"}\r\n\r\n\r\n\r\nclass Storelist(Resource):\r\n\r\n def get(self):\r\n return {\"item\":[x for x in StoreModel.query.all()]}",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
e.showWindow()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
e = Editor()
e.showWindow()
<|reserved_special_token_1|>
from editor.editor import Editor
e = Editor()
e.showWindow()
|
flexible
|
{
"blob_id": "46d6771fd9f589e2498cd019ba72232cbda06e5a",
"index": 3108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ne.showWindow()\n",
"step-3": "<mask token>\ne = Editor()\ne.showWindow()\n",
"step-4": "from editor.editor import Editor\ne = Editor()\ne.showWindow()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def login():
usernameInput = input("Username : ")
passwordInput = input("Password : ")
if usernameInput == "admin" and passwordInput == "1234":
return (showMenu())
else:
print("User or Password Wrong.")
return login()
def showMenu():
print("---Please Choose Menu---")
print("1. Vat7")
print("2. Calculation")
print("3. Vat Calulation")
return menuSelect()
def menuSelect():
usernameSelect1 = int(input("เลือกเมนู "))
if usernameSelect1 == 1:
price = int(input("Price : "))
vat = 7
result = price + (price * vat / 100)
print("ราคารวม Vat7 %",result)
return menuSelect()
elif usernameSelect1 == 2:
price1 = int(input("ราคาชิ้นที่ 1 : "))
price2 = int(input("ราคาชิ้นที่ 2 : "))
sum = price1 + price2
print("ราคารวม :",sum)
return menuSelect()
elif usernameSelect1 == 3:
return (priceResult())
def vat7(totalPrice):
vat = 7
result = totalPrice + (totalPrice * vat / 100)
return result
def priceResult():
price1 = int(input("ราคาชิ้นที่ 1 : "))
price2 = int(input("ราคาชิ้นที่ 2 : "))
return vat7(price1+price2)
print(login())
|
normal
|
{
"blob_id": "34dd6966a971e3d32e82a17cd08c3b66bb88163b",
"index": 1277,
"step-1": "<mask token>\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\n<mask token>\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\n<mask token>\n",
"step-2": "def login():\n usernameInput = input('Username : ')\n passwordInput = input('Password : ')\n if usernameInput == 'admin' and passwordInput == '1234':\n return showMenu()\n else:\n print('User or Password Wrong.')\n return login()\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\ndef menuSelect():\n usernameSelect1 = int(input('เลือกเมนู '))\n if usernameSelect1 == 1:\n price = int(input('Price : '))\n vat = 7\n result = price + price * vat / 100\n print('ราคารวม Vat7 %', result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n sum = price1 + price2\n print('ราคารวม :', sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return priceResult()\n\n\n<mask token>\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\n<mask token>\n",
"step-3": "def login():\n usernameInput = input('Username : ')\n passwordInput = input('Password : ')\n if usernameInput == 'admin' and passwordInput == '1234':\n return showMenu()\n else:\n print('User or Password Wrong.')\n return login()\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\ndef menuSelect():\n usernameSelect1 = int(input('เลือกเมนู '))\n if usernameSelect1 == 1:\n price = int(input('Price : '))\n vat = 7\n result = price + price * vat / 100\n print('ราคารวม Vat7 %', result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n sum = price1 + price2\n print('ราคารวม :', sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return priceResult()\n\n\ndef vat7(totalPrice):\n vat = 7\n result = totalPrice + totalPrice * vat / 100\n return result\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\n<mask token>\n",
"step-4": "def login():\n usernameInput = input('Username : ')\n passwordInput = input('Password : ')\n if usernameInput == 'admin' and passwordInput == '1234':\n return showMenu()\n else:\n print('User or Password Wrong.')\n return login()\n\n\ndef showMenu():\n print('---Please Choose Menu---')\n print('1. Vat7')\n print('2. Calculation')\n print('3. Vat Calulation')\n return menuSelect()\n\n\ndef menuSelect():\n usernameSelect1 = int(input('เลือกเมนู '))\n if usernameSelect1 == 1:\n price = int(input('Price : '))\n vat = 7\n result = price + price * vat / 100\n print('ราคารวม Vat7 %', result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n sum = price1 + price2\n print('ราคารวม :', sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return priceResult()\n\n\ndef vat7(totalPrice):\n vat = 7\n result = totalPrice + totalPrice * vat / 100\n return result\n\n\ndef priceResult():\n price1 = int(input('ราคาชิ้นที่ 1 : '))\n price2 = int(input('ราคาชิ้นที่ 2 : '))\n return vat7(price1 + price2)\n\n\nprint(login())\n",
"step-5": "def login():\n usernameInput = input(\"Username : \")\n passwordInput = input(\"Password : \")\n if usernameInput == \"admin\" and passwordInput == \"1234\":\n return (showMenu())\n else:\n print(\"User or Password Wrong.\")\n return login()\ndef showMenu():\n print(\"---Please Choose Menu---\")\n print(\"1. Vat7\")\n print(\"2. Calculation\")\n print(\"3. Vat Calulation\")\n return menuSelect()\ndef menuSelect():\n usernameSelect1 = int(input(\"เลือกเมนู \"))\n if usernameSelect1 == 1:\n price = int(input(\"Price : \"))\n vat = 7\n result = price + (price * vat / 100)\n print(\"ราคารวม Vat7 %\",result)\n return menuSelect()\n elif usernameSelect1 == 2:\n price1 = int(input(\"ราคาชิ้นที่ 1 : \"))\n price2 = int(input(\"ราคาชิ้นที่ 2 : \"))\n sum = price1 + price2\n print(\"ราคารวม :\",sum)\n return menuSelect()\n elif usernameSelect1 == 3:\n return (priceResult())\ndef vat7(totalPrice):\n vat = 7\n result = totalPrice + (totalPrice * vat / 100)\n return result\ndef priceResult():\n price1 = int(input(\"ราคาชิ้นที่ 1 : \"))\n price2 = int(input(\"ราคาชิ้นที่ 2 : \"))\n return vat7(price1+price2)\nprint(login())\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Welcome to the Guessing Game 2.0\n')
print('1 = Easy\t(1 - 10)')
print('2 = Medium\t(1 - 50)')
print('3 = Hard\t(1 - 100)')
<|reserved_special_token_1|>
print ("Welcome to the Guessing Game 2.0\n")
print ("1 = Easy\t(1 - 10)")
print ("2 = Medium\t(1 - 50)")
print ("3 = Hard\t(1 - 100)")
# Player: Input user's choice
# while: Check if user enters 1 or 2 or 3
# CPU: Generate a random number
# Player: Input user's number
# Variable: Add a variable 'attempt' and assign 1
# while: Check user number is wrong
# Conditional Statement: Check if user number is whether higher or lower.
# Player: Input user's number
# Variable: Add 1 to 'attempt'
# Result with attempts
# Player: Input user's choice
# Print: Thank you for playing the game.
|
flexible
|
{
"blob_id": "7f2489aa440441568af153b231420aa2736716ca",
"index": 4052,
"step-1": "<mask token>\n",
"step-2": "print('Welcome to the Guessing Game 2.0\\n')\nprint('1 = Easy\\t(1 - 10)')\nprint('2 = Medium\\t(1 - 50)')\nprint('3 = Hard\\t(1 - 100)')\n",
"step-3": "print (\"Welcome to the Guessing Game 2.0\\n\")\n\nprint (\"1 = Easy\\t(1 - 10)\")\nprint (\"2 = Medium\\t(1 - 50)\")\nprint (\"3 = Hard\\t(1 - 100)\")\n\n# Player: Input user's choice\n\n\n# while: Check if user enters 1 or 2 or 3\n\n\n # CPU: Generate a random number\n\n\n # Player: Input user's number\n\n\n # Variable: Add a variable 'attempt' and assign 1\n\n\n # while: Check user number is wrong\n\n\n # Conditional Statement: Check if user number is whether higher or lower.\n\n\n # Player: Input user's number\n\n\n # Variable: Add 1 to 'attempt'\n\n\n # Result with attempts\n\n\n # Player: Input user's choice\n\n\n# Print: Thank you for playing the game.",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import requests
from pyyoutube import Api
def get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):
"""
Получение информации со страницы с видео по video id
"""
YOUTUBE_URI = 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&' + \
'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'
format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY,
videoId=videoId,
maxResults=maxResults,
nextPageToken=nextPageToken)
content = requests.get(format_youtube_uri).text
data = json.loads(content)
return data
def get_text_of_comment(data):
"""
Получение комментариев из полученных данных под одним видео
"""
comms = set()
for item in data['items']:
comm = item['snippet']['topLevelComment']['snippet']['textDisplay']
comms.add(comm)
return comms
def get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30, maxResults=10, nextPageToken=''):
"""
Выгрузка maxResults комментариев
"""
api = Api(api_key=YOUTUBE_API_KEY)
video_by_keywords = api.search_by_keywords(q=query,
search_type=["video"],
count=count_video,
limit=limit)
videoId = [x.id.videoId for x in video_by_keywords.items]
comments_all = []
for id_video in videoId:
try:
data = get_data(YOUTUBE_API_KEY,
id_video,
maxResults=maxResults,
nextPageToken=nextPageToken)
comment = list(get_text_of_comment(data))
comments_all.append(comment)
except:
continue
comments = sum(comments_all, [])
return comments
|
normal
|
{
"blob_id": "4ed5ceb784fb1e3046ab9f10c4b556f2e94274db",
"index": 7054,
"step-1": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\n<mask token>\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-3": "<mask token>\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-4": "import json\nimport requests\nfrom pyyoutube import Api\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = (\n 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&'\n +\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n )\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY, videoId=\n videoId, maxResults=maxResults, nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30,\n maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query, search_type=[\n 'video'], count=count_video, limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY, id_video, maxResults=\n maxResults, nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-5": "import json\n\nimport requests\nfrom pyyoutube import Api\n\n\ndef get_data(YOUTUBE_API_KEY, videoId, maxResults, nextPageToken):\n \"\"\"\n Получение информации со страницы с видео по video id\n \"\"\"\n YOUTUBE_URI = 'https://www.googleapis.com/youtube/v3/commentThreads?key={KEY}&textFormat=plainText&' + \\\n 'part=snippet&videoId={videoId}&maxResults={maxResults}&pageToken={nextPageToken}'\n format_youtube_uri = YOUTUBE_URI.format(KEY=YOUTUBE_API_KEY,\n videoId=videoId,\n maxResults=maxResults,\n nextPageToken=nextPageToken)\n content = requests.get(format_youtube_uri).text\n data = json.loads(content)\n return data\n\n\ndef get_text_of_comment(data):\n \"\"\"\n Получение комментариев из полученных данных под одним видео\n \"\"\"\n comms = set()\n for item in data['items']:\n comm = item['snippet']['topLevelComment']['snippet']['textDisplay']\n comms.add(comm)\n return comms\n\n\ndef get_all_comments(YOUTUBE_API_KEY, query, count_video=10, limit=30, maxResults=10, nextPageToken=''):\n \"\"\"\n Выгрузка maxResults комментариев\n \"\"\"\n api = Api(api_key=YOUTUBE_API_KEY)\n video_by_keywords = api.search_by_keywords(q=query,\n search_type=[\"video\"],\n count=count_video,\n limit=limit)\n videoId = [x.id.videoId for x in video_by_keywords.items]\n\n comments_all = []\n for id_video in videoId:\n try:\n data = get_data(YOUTUBE_API_KEY,\n id_video,\n maxResults=maxResults,\n nextPageToken=nextPageToken)\n comment = list(get_text_of_comment(data))\n comments_all.append(comment)\n except:\n continue\n comments = sum(comments_all, [])\n return comments\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def line(start, end):
"""Draw line from start to end."""
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"""Draw square from start to end."""
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circulo(start, end):
"""Draw circle from start to end."""
distancia = end.x - start.x
begin_fill()
circle(distancia)
end_fill()
def rectangle(start, end):
"""Draw rectangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(4):
if count % 2:
forward(L)
else:
forward(L / 2)
left(90)
end_fill()
def triangle(start, end):
"""Draw triangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(3):
forward(L)
left(120)
end_fill()
def tap(x, y):
"""Store starting point or draw shape."""
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"""Store value in state at key."""
state[key] = value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line(start, end):
"""Draw line from start to end."""
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"""Draw square from start to end."""
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circulo(start, end):
"""Draw circle from start to end."""
distancia = end.x - start.x
begin_fill()
circle(distancia)
end_fill()
def rectangle(start, end):
"""Draw rectangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(4):
if count % 2:
forward(L)
else:
forward(L / 2)
left(90)
end_fill()
def triangle(start, end):
"""Draw triangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(3):
forward(L)
left(120)
end_fill()
def tap(x, y):
"""Store starting point or draw shape."""
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"""Store value in state at key."""
state[key] = value
<|reserved_special_token_0|>
setup(420, 420, 370, 0)
onscreenclick(tap)
listen()
onkey(undo, 'u')
onkey(lambda : color('black'), 'K')
onkey(lambda : color('#F5B7B1'), 'Q')
onkey(lambda : color('#00ffff'), 'Y')
onkey(lambda : color('white'), 'W')
onkey(lambda : color('green'), 'G')
onkey(lambda : color('blue'), 'B')
onkey(lambda : color('red'), 'R')
onkey(lambda : store('shape', line), 'l')
onkey(lambda : store('shape', square), 's')
onkey(lambda : store('shape', circulo), 'c')
onkey(lambda : store('shape', rectangle), 'r')
onkey(lambda : store('shape', triangle), 't')
done()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def line(start, end):
"""Draw line from start to end."""
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"""Draw square from start to end."""
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circulo(start, end):
"""Draw circle from start to end."""
distancia = end.x - start.x
begin_fill()
circle(distancia)
end_fill()
def rectangle(start, end):
"""Draw rectangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(4):
if count % 2:
forward(L)
else:
forward(L / 2)
left(90)
end_fill()
def triangle(start, end):
"""Draw triangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(3):
forward(L)
left(120)
end_fill()
def tap(x, y):
"""Store starting point or draw shape."""
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"""Store value in state at key."""
state[key] = value
state = {'start': None, 'shape': line}
setup(420, 420, 370, 0)
onscreenclick(tap)
listen()
onkey(undo, 'u')
onkey(lambda : color('black'), 'K')
onkey(lambda : color('#F5B7B1'), 'Q')
onkey(lambda : color('#00ffff'), 'Y')
onkey(lambda : color('white'), 'W')
onkey(lambda : color('green'), 'G')
onkey(lambda : color('blue'), 'B')
onkey(lambda : color('red'), 'R')
onkey(lambda : store('shape', line), 'l')
onkey(lambda : store('shape', square), 's')
onkey(lambda : store('shape', circulo), 'c')
onkey(lambda : store('shape', rectangle), 'r')
onkey(lambda : store('shape', triangle), 't')
done()
<|reserved_special_token_1|>
from turtle import *
from freegames import vector
def line(start, end):
"""Draw line from start to end."""
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"""Draw square from start to end."""
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circulo(start, end):
"""Draw circle from start to end."""
distancia = end.x - start.x
begin_fill()
circle(distancia)
end_fill()
def rectangle(start, end):
"""Draw rectangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(4):
if count % 2:
forward(L)
else:
forward(L / 2)
left(90)
end_fill()
def triangle(start, end):
"""Draw triangle from start to end."""
L = end.x - start.x
begin_fill()
for count in range(3):
forward(L)
left(120)
end_fill()
def tap(x, y):
"""Store starting point or draw shape."""
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"""Store value in state at key."""
state[key] = value
state = {'start': None, 'shape': line}
setup(420, 420, 370, 0)
onscreenclick(tap)
listen()
onkey(undo, 'u')
onkey(lambda : color('black'), 'K')
onkey(lambda : color('#F5B7B1'), 'Q')
onkey(lambda : color('#00ffff'), 'Y')
onkey(lambda : color('white'), 'W')
onkey(lambda : color('green'), 'G')
onkey(lambda : color('blue'), 'B')
onkey(lambda : color('red'), 'R')
onkey(lambda : store('shape', line), 'l')
onkey(lambda : store('shape', square), 's')
onkey(lambda : store('shape', circulo), 'c')
onkey(lambda : store('shape', rectangle), 'r')
onkey(lambda : store('shape', triangle), 't')
done()
<|reserved_special_token_1|>
from turtle import *
from freegames import vector
def line(start, end):
"Draw line from start to end."
up()
goto(start.x, start.y)
down()
goto(end.x, end.y)
def square(start, end):
"Draw square from start to end."
up()
goto(start.x, start.y)
down()
begin_fill()
for count in range(4):
forward(end.x - start.x)
left(90)
end_fill()
def circulo(start, end):
"Draw circle from start to end."
distancia = end.x - start.x
begin_fill()
circle(distancia)
end_fill()
#Adrian
def rectangle(start, end): # Esta funcion crea un rectangulo con la key "R"
"Draw rectangle from start to end."
L = end.x - start.x # Largo de rectangulo
begin_fill() # Iniciar relleno
for count in range(4): # For de 4 loops para 4 lineas del rectangulo
if(count % 2): # IF para diferenciar los lados del rectangulo
forward(L)
else:
forward(L/2) # Ancho de Rectangulo
left(90) # LLamada de funcion para vuelta a la izquierda de 90 grados
end_fill()
def triangle(start, end):
"Draw triangle from start to end."
#Ricardo Triangulo
L = end.x - start.x #Distancia para los lados
begin_fill() #Iniciar relleno
for count in range(3): #Crean un trazo de 3 lineas de 120
forward(L)
left(120) #Angulo de vuelta
end_fill()
def tap(x, y):
"Store starting point or draw shape."
start = state['start']
if start is None:
state['start'] = vector(x, y)
else:
shape = state['shape']
end = vector(x, y)
shape(start, end)
state['start'] = None
def store(key, value):
"Store value in state at key."
state[key] = value
state = {'start': None, 'shape': line}
setup(420, 420, 370, 0)
onscreenclick(tap)
listen()
onkey(undo, 'u')
onkey(lambda: color('black'), 'K')
onkey(lambda: color('#F5B7B1'), 'Q') #Color Maestra
onkey(lambda: color('#00ffff'), 'Y') #Color Ricardo
onkey(lambda: color('white'), 'W')
onkey(lambda: color('green'), 'G')
onkey(lambda: color('blue'), 'B')
onkey(lambda: color('red'), 'R')
onkey(lambda: store('shape', line), 'l')
onkey(lambda: store('shape', square), 's')
onkey(lambda: store('shape', circulo), 'c')
onkey(lambda: store('shape', rectangle), 'r')
onkey(lambda: store('shape', triangle), 't')
done()
|
flexible
|
{
"blob_id": "803283c9dac78c821373fa1025008b04919df72c",
"index": 5404,
"step-1": "<mask token>\n\n\ndef line(start, end):\n \"\"\"Draw line from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n goto(end.x, end.y)\n\n\ndef square(start, end):\n \"\"\"Draw square from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n begin_fill()\n for count in range(4):\n forward(end.x - start.x)\n left(90)\n end_fill()\n\n\ndef circulo(start, end):\n \"\"\"Draw circle from start to end.\"\"\"\n distancia = end.x - start.x\n begin_fill()\n circle(distancia)\n end_fill()\n\n\ndef rectangle(start, end):\n \"\"\"Draw rectangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(4):\n if count % 2:\n forward(L)\n else:\n forward(L / 2)\n left(90)\n end_fill()\n\n\ndef triangle(start, end):\n \"\"\"Draw triangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(3):\n forward(L)\n left(120)\n end_fill()\n\n\ndef tap(x, y):\n \"\"\"Store starting point or draw shape.\"\"\"\n start = state['start']\n if start is None:\n state['start'] = vector(x, y)\n else:\n shape = state['shape']\n end = vector(x, y)\n shape(start, end)\n state['start'] = None\n\n\ndef store(key, value):\n \"\"\"Store value in state at key.\"\"\"\n state[key] = value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef line(start, end):\n \"\"\"Draw line from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n goto(end.x, end.y)\n\n\ndef square(start, end):\n \"\"\"Draw square from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n begin_fill()\n for count in range(4):\n forward(end.x - start.x)\n left(90)\n end_fill()\n\n\ndef circulo(start, end):\n \"\"\"Draw circle from start to end.\"\"\"\n distancia = end.x - start.x\n begin_fill()\n circle(distancia)\n end_fill()\n\n\ndef rectangle(start, end):\n \"\"\"Draw rectangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(4):\n if count % 2:\n forward(L)\n else:\n forward(L / 2)\n left(90)\n end_fill()\n\n\ndef triangle(start, end):\n \"\"\"Draw triangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(3):\n forward(L)\n left(120)\n end_fill()\n\n\ndef tap(x, y):\n \"\"\"Store starting point or draw shape.\"\"\"\n start = state['start']\n if start is None:\n state['start'] = vector(x, y)\n else:\n shape = state['shape']\n end = vector(x, y)\n shape(start, end)\n state['start'] = None\n\n\ndef store(key, value):\n \"\"\"Store value in state at key.\"\"\"\n state[key] = value\n\n\n<mask token>\nsetup(420, 420, 370, 0)\nonscreenclick(tap)\nlisten()\nonkey(undo, 'u')\nonkey(lambda : color('black'), 'K')\nonkey(lambda : color('#F5B7B1'), 'Q')\nonkey(lambda : color('#00ffff'), 'Y')\nonkey(lambda : color('white'), 'W')\nonkey(lambda : color('green'), 'G')\nonkey(lambda : color('blue'), 'B')\nonkey(lambda : color('red'), 'R')\nonkey(lambda : store('shape', line), 'l')\nonkey(lambda : store('shape', square), 's')\nonkey(lambda : store('shape', circulo), 'c')\nonkey(lambda : store('shape', rectangle), 'r')\nonkey(lambda : store('shape', triangle), 't')\ndone()\n",
"step-3": "<mask token>\n\n\ndef line(start, end):\n \"\"\"Draw line from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n goto(end.x, end.y)\n\n\ndef square(start, end):\n \"\"\"Draw square from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n begin_fill()\n for count in range(4):\n forward(end.x - start.x)\n left(90)\n end_fill()\n\n\ndef circulo(start, end):\n \"\"\"Draw circle from start to end.\"\"\"\n distancia = end.x - start.x\n begin_fill()\n circle(distancia)\n end_fill()\n\n\ndef rectangle(start, end):\n \"\"\"Draw rectangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(4):\n if count % 2:\n forward(L)\n else:\n forward(L / 2)\n left(90)\n end_fill()\n\n\ndef triangle(start, end):\n \"\"\"Draw triangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(3):\n forward(L)\n left(120)\n end_fill()\n\n\ndef tap(x, y):\n \"\"\"Store starting point or draw shape.\"\"\"\n start = state['start']\n if start is None:\n state['start'] = vector(x, y)\n else:\n shape = state['shape']\n end = vector(x, y)\n shape(start, end)\n state['start'] = None\n\n\ndef store(key, value):\n \"\"\"Store value in state at key.\"\"\"\n state[key] = value\n\n\nstate = {'start': None, 'shape': line}\nsetup(420, 420, 370, 0)\nonscreenclick(tap)\nlisten()\nonkey(undo, 'u')\nonkey(lambda : color('black'), 'K')\nonkey(lambda : color('#F5B7B1'), 'Q')\nonkey(lambda : color('#00ffff'), 'Y')\nonkey(lambda : color('white'), 'W')\nonkey(lambda : color('green'), 'G')\nonkey(lambda : color('blue'), 'B')\nonkey(lambda : color('red'), 'R')\nonkey(lambda : store('shape', line), 'l')\nonkey(lambda : store('shape', square), 's')\nonkey(lambda : store('shape', circulo), 'c')\nonkey(lambda : store('shape', rectangle), 'r')\nonkey(lambda : store('shape', triangle), 't')\ndone()\n",
"step-4": "from turtle import *\nfrom freegames import vector\n\n\ndef line(start, end):\n \"\"\"Draw line from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n goto(end.x, end.y)\n\n\ndef square(start, end):\n \"\"\"Draw square from start to end.\"\"\"\n up()\n goto(start.x, start.y)\n down()\n begin_fill()\n for count in range(4):\n forward(end.x - start.x)\n left(90)\n end_fill()\n\n\ndef circulo(start, end):\n \"\"\"Draw circle from start to end.\"\"\"\n distancia = end.x - start.x\n begin_fill()\n circle(distancia)\n end_fill()\n\n\ndef rectangle(start, end):\n \"\"\"Draw rectangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(4):\n if count % 2:\n forward(L)\n else:\n forward(L / 2)\n left(90)\n end_fill()\n\n\ndef triangle(start, end):\n \"\"\"Draw triangle from start to end.\"\"\"\n L = end.x - start.x\n begin_fill()\n for count in range(3):\n forward(L)\n left(120)\n end_fill()\n\n\ndef tap(x, y):\n \"\"\"Store starting point or draw shape.\"\"\"\n start = state['start']\n if start is None:\n state['start'] = vector(x, y)\n else:\n shape = state['shape']\n end = vector(x, y)\n shape(start, end)\n state['start'] = None\n\n\ndef store(key, value):\n \"\"\"Store value in state at key.\"\"\"\n state[key] = value\n\n\nstate = {'start': None, 'shape': line}\nsetup(420, 420, 370, 0)\nonscreenclick(tap)\nlisten()\nonkey(undo, 'u')\nonkey(lambda : color('black'), 'K')\nonkey(lambda : color('#F5B7B1'), 'Q')\nonkey(lambda : color('#00ffff'), 'Y')\nonkey(lambda : color('white'), 'W')\nonkey(lambda : color('green'), 'G')\nonkey(lambda : color('blue'), 'B')\nonkey(lambda : color('red'), 'R')\nonkey(lambda : store('shape', line), 'l')\nonkey(lambda : store('shape', square), 's')\nonkey(lambda : store('shape', circulo), 'c')\nonkey(lambda : store('shape', rectangle), 'r')\nonkey(lambda : store('shape', triangle), 't')\ndone()\n",
"step-5": "from turtle import *\r\nfrom freegames import vector\r\n\r\ndef line(start, end):\r\n \"Draw line from start to end.\"\r\n up()\r\n goto(start.x, start.y)\r\n down()\r\n goto(end.x, end.y)\r\n\r\ndef square(start, end):\r\n \"Draw square from start to end.\"\r\n up()\r\n goto(start.x, start.y)\r\n down()\r\n begin_fill()\r\n\r\n for count in range(4):\r\n forward(end.x - start.x)\r\n left(90)\r\n\r\n end_fill()\r\n\r\ndef circulo(start, end):\r\n \"Draw circle from start to end.\"\r\n distancia = end.x - start.x\r\n begin_fill()\r\n circle(distancia)\r\n end_fill()\r\n\r\n#Adrian\r\ndef rectangle(start, end): # Esta funcion crea un rectangulo con la key \"R\"\r\n \"Draw rectangle from start to end.\" \r\n L = end.x - start.x # Largo de rectangulo\r\n begin_fill() # Iniciar relleno\r\n for count in range(4): # For de 4 loops para 4 lineas del rectangulo\r\n if(count % 2): # IF para diferenciar los lados del rectangulo\r\n forward(L)\r\n else:\r\n forward(L/2) # Ancho de Rectangulo\r\n left(90) # LLamada de funcion para vuelta a la izquierda de 90 grados\r\n end_fill()\r\n\r\ndef triangle(start, end):\r\n \"Draw triangle from start to end.\"\r\n #Ricardo Triangulo\r\n L = end.x - start.x #Distancia para los lados\r\n begin_fill() #Iniciar relleno\r\n for count in range(3): #Crean un trazo de 3 lineas de 120 \r\n forward(L)\r\n left(120) #Angulo de vuelta\r\n end_fill()\r\n \r\n \r\n\r\ndef tap(x, y):\r\n \"Store starting point or draw shape.\"\r\n start = state['start']\r\n\r\n if start is None:\r\n state['start'] = vector(x, y)\r\n else:\r\n shape = state['shape']\r\n end = vector(x, y)\r\n shape(start, end)\r\n state['start'] = None\r\n\r\ndef store(key, value):\r\n \"Store value in state at key.\"\r\n state[key] = value\r\n\r\nstate = {'start': None, 'shape': line}\r\nsetup(420, 420, 370, 0)\r\nonscreenclick(tap)\r\nlisten()\r\nonkey(undo, 'u')\r\nonkey(lambda: color('black'), 'K')\r\nonkey(lambda: color('#F5B7B1'), 'Q') #Color Maestra\r\nonkey(lambda: color('#00ffff'), 'Y') #Color Ricardo\r\nonkey(lambda: color('white'), 'W')\r\nonkey(lambda: color('green'), 'G')\r\nonkey(lambda: color('blue'), 'B')\r\nonkey(lambda: color('red'), 'R')\r\nonkey(lambda: store('shape', line), 'l')\r\nonkey(lambda: store('shape', square), 's')\r\nonkey(lambda: store('shape', circulo), 'c')\r\nonkey(lambda: store('shape', rectangle), 'r')\r\nonkey(lambda: store('shape', triangle), 't')\r\ndone()\r\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
#!/usr/bin/env python
"""
maskAOI.py
Dan Fitch 20150618
"""
from __future__ import print_function
import sys, os, glob, shutil, fnmatch, math, re, numpy, csv
from PIL import Image, ImageFile, ImageDraw, ImageColor, ImageOps, ImageStat
ImageFile.MAXBLOCK = 1048576
DEBUG = False
AOI_DIR='/study/reference/public/IAPS/IAPS/IAPS_2008_1-20_800x600BMP/IAPS_2008_AOIs/'
IMG_DIR='/study/midus/IAPS2005png/'
SALIENCY_DIR='/home/fitch/aoi/saliency/'
SUN_SALIENCY_DIR='/home/fitch/aoi/sunsaliency/'
MASK_NAMES = ["0", "E", "1", "2", "3", "4"]
# A wrapper function to check if a string is a number (and account for negatives)
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
#Function to return only the main, averaged AOI files (the .OBT) and their coordinates.
def getCoordinates(picturename):
#Load one current image
aoiName = picturename + ".OBT"
aoiList = []
obtfile = "{0}/{1}".format(AOI_DIR, aoiName)
if not os.path.exists(obtfile):
if DEBUG: print("WARNING: No OBT file found for " + picturename)
return []
with open(obtfile) as file:
stringContent = file.readlines()
for string in stringContent:
dirtyContent = re.split(", | |=", string)
content = map(int, [ x for x in dirtyContent if RepresentsInt(x) ])
if content and content != [0]:
aoiList.append(content)
return aoiList
def drawAOI(aoi, i, d):
if aoi[0] == 1:
drawOneRect(aoi[1:5], i, d)
else:
drawOneEllipse(aoi[1:5], i, d)
# Function to display the AOI as masks
def createAOIMasks(pictureName, size):
if DEBUG: print("Displaying AOIs for picture {0}".format(pictureName))
aoiList = getCoordinates(pictureName)
if aoiList == []: return None
masks = []
# L is grayscale
img = Image.new("L", size, 0)
draw = ImageDraw.Draw(img)
for aoi in aoiList:
drawAOI(aoi, img, draw)
masks.append(img)
# Now the "emotional" masks, index 2 and up theoretically
emo = Image.new("L", size, 0)
emo_draw = ImageDraw.Draw(emo)
for aoi in aoiList[1:]:
drawAOI(aoi, emo, emo_draw)
masks.append(emo)
# Now we draw each mask individually
for aoi in aoiList:
individual = Image.new("L", size, 0)
individual_draw = ImageDraw.Draw(individual)
drawAOI(aoi, individual, individual_draw)
masks.append(individual)
return masks
def drawOneEllipse(aoi, img, draw):
#Draw one ellipse on the figure given
if DEBUG: print("Ellipse centered at [{0}, {1}] with {2} {3}".format(aoi[0], aoi[1], aoi[2], aoi[3]))
imgDim = img.size
cx=aoi[0]
cy=aoi[1]
w=2*aoi[2]
h=2*aoi[3]
imgArea=imgDim[0]*imgDim[1]
LeftX=cx-aoi[2]
RightX=cx+aoi[2]
TopY=cy-aoi[3]
BottomY=cy+aoi[3]
draw.ellipse(((LeftX,TopY),(RightX,BottomY)), fill="white", outline="white")
def drawOneRect(aoi, img, draw):
#Draw one rectangle on the figure given
if DEBUG: print("Rectangle with Coordinates {0}".format(aoi))
imgDim = img.size
TopY=aoi[3]
BottomY=aoi[1]
LeftX=aoi[0]
RightX=aoi[2]
if DEBUG: print(" Top:{0}, Bottom:{1}, Left:{2}, Right: {3}".format(TopY, BottomY, LeftX, RightX))
imgArea=imgDim[0]*imgDim[1]
draw.rectangle(((LeftX,TopY),(RightX,BottomY)), fill="white", outline="white")
def stat(img, mask=None):
if mask == None:
return ImageStat.Stat(img)
else:
return ImageStat.Stat(img, mask)
def brightness(img, mask=None):
return stat(img,mask).rms[0]
def luminance(c):
if len(c) < 3 or len(c) > 4:
raise Exception("Luminance got values: ", c)
r = c[0]
b = c[1]
g = c[2]
lum = r*0.2126 + g*0.7152 + b*0.0722
if len(c) == 4:
# Multiply by alpha... kind of hokey but should work for most cases
result = lum * (c[3] / 255.0)
else:
result = lum
if math.isnan(result):
return 0.0
else:
return result
def complexity(pictureName, key, img):
name = "masks/{0}-{1}.jpg".format(pictureName, key)
img.save(name, quality=80, format="JPEG", optimize=True, progressive=True)
size = os.path.getsize(name)
#os.remove(name)
return size
def results_for_mask(withColors, original, pictureName, key, mask):
# We also want the area outside of the mask
mask_inverted = ImageOps.invert(mask)
stats_mask = stat(mask)
stats_in = stat(original, mask)
stats_out = stat(original, mask_inverted)
# Complexity uses the resultant image saved as jpg, so we need to prepare some actual images
stats_in_image = Image.new('RGBA', original.size, "black")
stats_in_image.paste(original, mask=mask)
stats_out_image = Image.new('RGBA', original.size, "black")
stats_out_image.paste(original, mask=mask_inverted)
try:
if withColors:
return {
key + '_mask_lum': stats_mask.mean[0] / 256.0,
key + '_in_lum': luminance(stats_in.mean) / 256.0,
key + '_in_r': stats_in.mean[0] / 256.0,
key + '_in_g': stats_in.mean[1] / 256.0,
key + '_in_b': stats_in.mean[2] / 256.0,
key + '_in_complexity': complexity(pictureName, key + "in", stats_in_image),
key + '_out_lum': luminance(stats_out.mean) / 256.0,
key + '_out_r': stats_out.mean[0] / 256.0,
key + '_out_g': stats_out.mean[1] / 256.0,
key + '_out_b': stats_out.mean[2] / 256.0,
key + '_out_complexity': complexity(pictureName, key + "out", stats_out_image),
}
else:
return {
key + '_in_lum': luminance(stats_in.mean) / 256.0,
key + '_out_lum': luminance(stats_out.mean) / 256.0,
}
except ZeroDivisionError:
return {}
def do_saliency(original, masks, path, prefix, pictureName, results):
saliency = Image.open(path + pictureName + ".png")
if saliency.mode != "RGBA":
saliency = saliency.convert("RGBA")
saliency = saliency.resize(original.size)
stats_saliency = stat(saliency)
results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0
for i, mask in zip(MASK_NAMES, masks):
stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask)
results.update(stuff)
saliency_bw = saliency.convert("L")
s_array = numpy.array(saliency_bw)
m_array = numpy.array(masks[0])
dot = numpy.dot(s_array, numpy.rot90(m_array))
results[prefix + "_aoi_dotproduct_sum"] = numpy.sum(dot)
def write_stats(writer, filename, pictureName):
original = Image.open(IMG_DIR + filename)
if original.mode != "RGBA":
# P is palette. Did you know BMP *and* PNG files can have 8-bit palettes? WHAAAT
original = original.convert("RGBA")
# First, draw the AOI masks in white on black
# This returns a list, the first mask is ALL AOIs, the second is the "emotional" ones >=2, and the rest are each individual shape
masks = createAOIMasks(pictureName, original.size)
if masks == None:
print("No masks found in: " + filename)
return False
stats_orig = stat(original)
results = {
'image_name': pictureName,
'orig_lum': luminance(stats_orig.mean) / 256.0,
'orig_r': stats_orig.mean[0] / 256.0,
'orig_g': stats_orig.mean[1] / 256.0,
'orig_b': stats_orig.mean[2] / 256.0,
'orig_complexity': complexity(pictureName, "original", original),
}
for i, mask in zip(MASK_NAMES, masks):
stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)
results.update(stuff)
# And finally we get the saliency image and resize it and do a bunch of garbage with it and the AOI masks
do_saliency(original, masks, SALIENCY_DIR, "saliency", pictureName, results)
do_saliency(original, masks, SUN_SALIENCY_DIR, "sun_saliency", pictureName, results)
writer.writerow(results)
if DEBUG: print("Generated stats for " + filename)
return True
with open('stats.csv', 'wb') as csvfile:
per_mask_fields = [
'_mask_lum',
'_in_lum',
'_in_r',
'_in_g',
'_in_b',
'_in_complexity',
'_out_lum',
'_out_r',
'_out_g',
'_out_b',
'_out_complexity',
]
per_saliency_fields = [
'_in_lum',
'_out_lum',
]
fields = [
'image_name',
'orig_lum',
'orig_r',
'orig_g',
'orig_b',
'orig_complexity',
]
for i in MASK_NAMES:
for f in per_mask_fields:
fields.append("aoi{0}{1}".format(i,f))
fields.append("saliency_aoi_dotproduct_sum")
fields.append("saliency_lum")
for i in MASK_NAMES:
for f in per_saliency_fields:
fields.append("saliency{0}{1}".format(i,f))
fields.append("sun_saliency_aoi_dotproduct_sum")
fields.append("sun_saliency_lum")
for i in MASK_NAMES:
for f in per_saliency_fields:
fields.append("sun_saliency{0}{1}".format(i,f))
writer = csv.DictWriter(csvfile, fieldnames=fields)
writer.writerow(dict(zip(fields,fields)))
for filename in sorted(os.listdir(IMG_DIR)):
if not ".png" in filename:
continue
pictureName = filename.replace(".png", "")
try:
write_stats(writer, filename, pictureName)
except:
print("Error on file " + pictureName, file=sys.stderr)
raise
|
normal
|
{
"blob_id": "833053a5a75636267feaad5ddaa21dce1de34038",
"index": 5319,
"step-1": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n<mask token>\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\n<mask token>\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\n<mask token>\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\ndef brightness(img, mask=None):\n return stat(img, mask).rms[0]\n\n\n<mask token>\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n original = Image.open(IMG_DIR + filename)\n if original.mode != 'RGBA':\n original = original.convert('RGBA')\n masks = createAOIMasks(pictureName, original.size)\n if masks == None:\n print('No masks found in: ' + filename)\n return False\n stats_orig = stat(original)\n results = {'image_name': pictureName, 'orig_lum': luminance(stats_orig.\n mean) / 256.0, 'orig_r': stats_orig.mean[0] / 256.0, 'orig_g': \n stats_orig.mean[1] / 256.0, 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, 'original', original)}\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n do_saliency(original, masks, SALIENCY_DIR, 'saliency', pictureName, results\n )\n do_saliency(original, masks, SUN_SALIENCY_DIR, 'sun_saliency',\n pictureName, results)\n writer.writerow(results)\n if DEBUG:\n print('Generated stats for ' + filename)\n return True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef getCoordinates(picturename):\n aoiName = picturename + '.OBT'\n aoiList = []\n obtfile = '{0}/{1}'.format(AOI_DIR, aoiName)\n if not os.path.exists(obtfile):\n if DEBUG:\n print('WARNING: No OBT file found for ' + picturename)\n return []\n with open(obtfile) as file:\n stringContent = file.readlines()\n for string in stringContent:\n dirtyContent = re.split(', | |=', string)\n content = map(int, [x for x in dirtyContent if RepresentsInt(x)])\n if content and content != [0]:\n aoiList.append(content)\n return aoiList\n\n\n<mask token>\n\n\ndef createAOIMasks(pictureName, size):\n if DEBUG:\n print('Displaying AOIs for picture {0}'.format(pictureName))\n aoiList = getCoordinates(pictureName)\n if aoiList == []:\n return None\n masks = []\n img = Image.new('L', size, 0)\n draw = ImageDraw.Draw(img)\n for aoi in aoiList:\n drawAOI(aoi, img, draw)\n masks.append(img)\n emo = Image.new('L', size, 0)\n emo_draw = ImageDraw.Draw(emo)\n for aoi in aoiList[1:]:\n drawAOI(aoi, emo, emo_draw)\n masks.append(emo)\n for aoi in aoiList:\n individual = Image.new('L', size, 0)\n individual_draw = ImageDraw.Draw(individual)\n drawAOI(aoi, individual, individual_draw)\n masks.append(individual)\n return masks\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\ndef brightness(img, mask=None):\n return stat(img, mask).rms[0]\n\n\ndef luminance(c):\n if len(c) < 3 or len(c) > 4:\n raise Exception('Luminance got values: ', c)\n r = c[0]\n b = c[1]\n g = c[2]\n lum = r * 0.2126 + g * 0.7152 + b * 0.0722\n if len(c) == 4:\n result = lum * (c[3] / 255.0)\n else:\n result = lum\n if math.isnan(result):\n return 0.0\n else:\n return result\n\n\ndef complexity(pictureName, key, img):\n name = 'masks/{0}-{1}.jpg'.format(pictureName, key)\n img.save(name, quality=80, format='JPEG', optimize=True, progressive=True)\n size = os.path.getsize(name)\n return size\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n original = Image.open(IMG_DIR + filename)\n if original.mode != 'RGBA':\n original = original.convert('RGBA')\n masks = createAOIMasks(pictureName, original.size)\n if masks == None:\n print('No masks found in: ' + filename)\n return False\n stats_orig = stat(original)\n results = {'image_name': pictureName, 'orig_lum': luminance(stats_orig.\n mean) / 256.0, 'orig_r': stats_orig.mean[0] / 256.0, 'orig_g': \n stats_orig.mean[1] / 256.0, 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, 'original', original)}\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n do_saliency(original, masks, SALIENCY_DIR, 'saliency', pictureName, results\n )\n do_saliency(original, masks, SUN_SALIENCY_DIR, 'sun_saliency',\n pictureName, results)\n writer.writerow(results)\n if DEBUG:\n print('Generated stats for ' + filename)\n return True\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef getCoordinates(picturename):\n aoiName = picturename + '.OBT'\n aoiList = []\n obtfile = '{0}/{1}'.format(AOI_DIR, aoiName)\n if not os.path.exists(obtfile):\n if DEBUG:\n print('WARNING: No OBT file found for ' + picturename)\n return []\n with open(obtfile) as file:\n stringContent = file.readlines()\n for string in stringContent:\n dirtyContent = re.split(', | |=', string)\n content = map(int, [x for x in dirtyContent if RepresentsInt(x)])\n if content and content != [0]:\n aoiList.append(content)\n return aoiList\n\n\ndef drawAOI(aoi, i, d):\n if aoi[0] == 1:\n drawOneRect(aoi[1:5], i, d)\n else:\n drawOneEllipse(aoi[1:5], i, d)\n\n\ndef createAOIMasks(pictureName, size):\n if DEBUG:\n print('Displaying AOIs for picture {0}'.format(pictureName))\n aoiList = getCoordinates(pictureName)\n if aoiList == []:\n return None\n masks = []\n img = Image.new('L', size, 0)\n draw = ImageDraw.Draw(img)\n for aoi in aoiList:\n drawAOI(aoi, img, draw)\n masks.append(img)\n emo = Image.new('L', size, 0)\n emo_draw = ImageDraw.Draw(emo)\n for aoi in aoiList[1:]:\n drawAOI(aoi, emo, emo_draw)\n masks.append(emo)\n for aoi in aoiList:\n individual = Image.new('L', size, 0)\n individual_draw = ImageDraw.Draw(individual)\n drawAOI(aoi, individual, individual_draw)\n masks.append(individual)\n return masks\n\n\ndef drawOneEllipse(aoi, img, draw):\n if DEBUG:\n print('Ellipse centered at [{0}, {1}] with {2} {3}'.format(aoi[0],\n aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx = aoi[0]\n cy = aoi[1]\n w = 2 * aoi[2]\n h = 2 * aoi[3]\n imgArea = imgDim[0] * imgDim[1]\n LeftX = cx - aoi[2]\n RightX = cx + aoi[2]\n TopY = cy - aoi[3]\n BottomY = cy + aoi[3]\n draw.ellipse(((LeftX, TopY), (RightX, BottomY)), fill='white', outline=\n 'white')\n\n\ndef drawOneRect(aoi, img, draw):\n if DEBUG:\n print('Rectangle with Coordinates {0}'.format(aoi))\n imgDim = img.size\n TopY = aoi[3]\n BottomY = aoi[1]\n LeftX = aoi[0]\n RightX = aoi[2]\n if DEBUG:\n print(' Top:{0}, Bottom:{1}, Left:{2}, Right: {3}'.format(TopY,\n BottomY, LeftX, RightX))\n imgArea = imgDim[0] * imgDim[1]\n draw.rectangle(((LeftX, TopY), (RightX, BottomY)), fill='white',\n outline='white')\n\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\n\ndef brightness(img, mask=None):\n return stat(img, mask).rms[0]\n\n\ndef luminance(c):\n if len(c) < 3 or len(c) > 4:\n raise Exception('Luminance got values: ', c)\n r = c[0]\n b = c[1]\n g = c[2]\n lum = r * 0.2126 + g * 0.7152 + b * 0.0722\n if len(c) == 4:\n result = lum * (c[3] / 255.0)\n else:\n result = lum\n if math.isnan(result):\n return 0.0\n else:\n return result\n\n\ndef complexity(pictureName, key, img):\n name = 'masks/{0}-{1}.jpg'.format(pictureName, key)\n img.save(name, quality=80, format='JPEG', optimize=True, progressive=True)\n size = os.path.getsize(name)\n return size\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n stats_in_image = Image.new('RGBA', original.size, 'black')\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, 'black')\n stats_out_image.paste(original, mask=mask_inverted)\n try:\n if withColors:\n return {(key + '_mask_lum'): stats_mask.mean[0] / 256.0, (key +\n '_in_lum'): luminance(stats_in.mean) / 256.0, (key +\n '_in_r'): stats_in.mean[0] / 256.0, (key + '_in_g'): \n stats_in.mean[1] / 256.0, (key + '_in_b'): stats_in.mean[2] /\n 256.0, (key + '_in_complexity'): complexity(pictureName, \n key + 'in', stats_in_image), (key + '_out_lum'): luminance(\n stats_out.mean) / 256.0, (key + '_out_r'): stats_out.mean[0\n ] / 256.0, (key + '_out_g'): stats_out.mean[1] / 256.0, (\n key + '_out_b'): stats_out.mean[2] / 256.0, (key +\n '_out_complexity'): complexity(pictureName, key + 'out',\n stats_out_image)}\n else:\n return {(key + '_in_lum'): luminance(stats_in.mean) / 256.0, (\n key + '_out_lum'): luminance(stats_out.mean) / 256.0}\n except ZeroDivisionError:\n return {}\n\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + '.png')\n if saliency.mode != 'RGBA':\n saliency = saliency.convert('RGBA')\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask\n )\n results.update(stuff)\n saliency_bw = saliency.convert('L')\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n results[prefix + '_aoi_dotproduct_sum'] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n original = Image.open(IMG_DIR + filename)\n if original.mode != 'RGBA':\n original = original.convert('RGBA')\n masks = createAOIMasks(pictureName, original.size)\n if masks == None:\n print('No masks found in: ' + filename)\n return False\n stats_orig = stat(original)\n results = {'image_name': pictureName, 'orig_lum': luminance(stats_orig.\n mean) / 256.0, 'orig_r': stats_orig.mean[0] / 256.0, 'orig_g': \n stats_orig.mean[1] / 256.0, 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, 'original', original)}\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n do_saliency(original, masks, SALIENCY_DIR, 'saliency', pictureName, results\n )\n do_saliency(original, masks, SUN_SALIENCY_DIR, 'sun_saliency',\n pictureName, results)\n writer.writerow(results)\n if DEBUG:\n print('Generated stats for ' + filename)\n return True\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"\nmaskAOI.py\n\nDan Fitch 20150618\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys, os, glob, shutil, fnmatch, math, re, numpy, csv\nfrom PIL import Image, ImageFile, ImageDraw, ImageColor, ImageOps, ImageStat\nImageFile.MAXBLOCK = 1048576\n\nDEBUG = False\n\nAOI_DIR='/study/reference/public/IAPS/IAPS/IAPS_2008_1-20_800x600BMP/IAPS_2008_AOIs/'\nIMG_DIR='/study/midus/IAPS2005png/'\nSALIENCY_DIR='/home/fitch/aoi/saliency/'\nSUN_SALIENCY_DIR='/home/fitch/aoi/sunsaliency/'\nMASK_NAMES = [\"0\", \"E\", \"1\", \"2\", \"3\", \"4\"]\n\n\n# A wrapper function to check if a string is a number (and account for negatives)\ndef RepresentsInt(s):\n\ttry: \n\t\tint(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\t\t\n\n#Function to return only the main, averaged AOI files (the .OBT) and their coordinates.\ndef getCoordinates(picturename):\n #Load one current image\n aoiName = picturename + \".OBT\"\n aoiList = []\n obtfile = \"{0}/{1}\".format(AOI_DIR, aoiName)\n if not os.path.exists(obtfile):\n if DEBUG: print(\"WARNING: No OBT file found for \" + picturename)\n return []\n with open(obtfile) as file:\n stringContent = file.readlines()\n for string in stringContent:\n dirtyContent = re.split(\", | |=\", string)\n content = map(int, [ x for x in dirtyContent if RepresentsInt(x) ])\n if content and content != [0]:\n aoiList.append(content)\n return aoiList\n\n\ndef drawAOI(aoi, i, d):\n if aoi[0] == 1:\n drawOneRect(aoi[1:5], i, d)\n else:\n drawOneEllipse(aoi[1:5], i, d)\n\n# Function to display the AOI as masks\ndef createAOIMasks(pictureName, size):\n if DEBUG: print(\"Displaying AOIs for picture {0}\".format(pictureName))\n aoiList = getCoordinates(pictureName)\n\n if aoiList == []: return None\n\n masks = []\n\n # L is grayscale\n img = Image.new(\"L\", size, 0)\n draw = ImageDraw.Draw(img)\n\n for aoi in aoiList:\n drawAOI(aoi, img, draw)\n\n masks.append(img)\n\n # Now the \"emotional\" masks, index 2 and up theoretically\n emo = Image.new(\"L\", size, 0)\n emo_draw = ImageDraw.Draw(emo)\n\n for aoi in aoiList[1:]:\n drawAOI(aoi, emo, emo_draw)\n\n masks.append(emo)\n\n # Now we draw each mask individually\n for aoi in aoiList:\n individual = Image.new(\"L\", size, 0)\n individual_draw = ImageDraw.Draw(individual)\n drawAOI(aoi, individual, individual_draw)\n masks.append(individual)\n\n return masks\n\n\t\t\ndef drawOneEllipse(aoi, img, draw):\n #Draw one ellipse on the figure given\n if DEBUG: print(\"Ellipse centered at [{0}, {1}] with {2} {3}\".format(aoi[0], aoi[1], aoi[2], aoi[3]))\n imgDim = img.size\n cx=aoi[0]\n cy=aoi[1]\n w=2*aoi[2]\n h=2*aoi[3]\n imgArea=imgDim[0]*imgDim[1]\n LeftX=cx-aoi[2]\n RightX=cx+aoi[2]\n TopY=cy-aoi[3]\n BottomY=cy+aoi[3]\n draw.ellipse(((LeftX,TopY),(RightX,BottomY)), fill=\"white\", outline=\"white\")\n\t\ndef drawOneRect(aoi, img, draw):\n #Draw one rectangle on the figure given\n if DEBUG: print(\"Rectangle with Coordinates {0}\".format(aoi))\n imgDim = img.size\n TopY=aoi[3]\n BottomY=aoi[1]\n LeftX=aoi[0]\n RightX=aoi[2]\n if DEBUG: print(\" Top:{0}, Bottom:{1}, Left:{2}, Right: {3}\".format(TopY, BottomY, LeftX, RightX))\n imgArea=imgDim[0]*imgDim[1]\n draw.rectangle(((LeftX,TopY),(RightX,BottomY)), fill=\"white\", outline=\"white\")\n\ndef stat(img, mask=None):\n if mask == None:\n return ImageStat.Stat(img)\n else:\n return ImageStat.Stat(img, mask)\n\ndef brightness(img, mask=None):\n return stat(img,mask).rms[0]\n\t\ndef luminance(c):\n if len(c) < 3 or len(c) > 4:\n raise Exception(\"Luminance got values: \", c)\n r = c[0]\n b = c[1]\n g = c[2]\n lum = r*0.2126 + g*0.7152 + b*0.0722\n if len(c) == 4:\n # Multiply by alpha... kind of hokey but should work for most cases\n result = lum * (c[3] / 255.0)\n else:\n result = lum\n\n if math.isnan(result):\n return 0.0\n else:\n return result\n\ndef complexity(pictureName, key, img):\n name = \"masks/{0}-{1}.jpg\".format(pictureName, key)\n img.save(name, quality=80, format=\"JPEG\", optimize=True, progressive=True)\n size = os.path.getsize(name)\n #os.remove(name)\n return size\n\n\n\ndef results_for_mask(withColors, original, pictureName, key, mask):\n # We also want the area outside of the mask\n mask_inverted = ImageOps.invert(mask)\n stats_mask = stat(mask)\n stats_in = stat(original, mask)\n stats_out = stat(original, mask_inverted)\n\n # Complexity uses the resultant image saved as jpg, so we need to prepare some actual images\n\n stats_in_image = Image.new('RGBA', original.size, \"black\")\n stats_in_image.paste(original, mask=mask)\n stats_out_image = Image.new('RGBA', original.size, \"black\")\n stats_out_image.paste(original, mask=mask_inverted)\n\n try:\n if withColors:\n return {\n key + '_mask_lum': stats_mask.mean[0] / 256.0,\n key + '_in_lum': luminance(stats_in.mean) / 256.0,\n key + '_in_r': stats_in.mean[0] / 256.0,\n key + '_in_g': stats_in.mean[1] / 256.0,\n key + '_in_b': stats_in.mean[2] / 256.0,\n key + '_in_complexity': complexity(pictureName, key + \"in\", stats_in_image),\n key + '_out_lum': luminance(stats_out.mean) / 256.0,\n key + '_out_r': stats_out.mean[0] / 256.0,\n key + '_out_g': stats_out.mean[1] / 256.0,\n key + '_out_b': stats_out.mean[2] / 256.0,\n key + '_out_complexity': complexity(pictureName, key + \"out\", stats_out_image),\n }\n else:\n return {\n key + '_in_lum': luminance(stats_in.mean) / 256.0,\n key + '_out_lum': luminance(stats_out.mean) / 256.0,\n }\n except ZeroDivisionError:\n return {}\n\ndef do_saliency(original, masks, path, prefix, pictureName, results):\n saliency = Image.open(path + pictureName + \".png\")\n if saliency.mode != \"RGBA\":\n saliency = saliency.convert(\"RGBA\")\n saliency = saliency.resize(original.size)\n stats_saliency = stat(saliency)\n results[prefix + '_lum'] = luminance(stats_saliency.mean) / 256.0\n\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(False, saliency, pictureName, prefix + i, mask)\n results.update(stuff)\n\n saliency_bw = saliency.convert(\"L\")\n s_array = numpy.array(saliency_bw)\n m_array = numpy.array(masks[0])\n dot = numpy.dot(s_array, numpy.rot90(m_array))\n\n results[prefix + \"_aoi_dotproduct_sum\"] = numpy.sum(dot)\n\n\ndef write_stats(writer, filename, pictureName):\n\n original = Image.open(IMG_DIR + filename)\n\n if original.mode != \"RGBA\":\n # P is palette. Did you know BMP *and* PNG files can have 8-bit palettes? WHAAAT\n original = original.convert(\"RGBA\")\n\n # First, draw the AOI masks in white on black\n # This returns a list, the first mask is ALL AOIs, the second is the \"emotional\" ones >=2, and the rest are each individual shape\n masks = createAOIMasks(pictureName, original.size)\n\n if masks == None:\n print(\"No masks found in: \" + filename)\n return False\n\n stats_orig = stat(original)\n\n results = {\n 'image_name': pictureName,\n 'orig_lum': luminance(stats_orig.mean) / 256.0,\n 'orig_r': stats_orig.mean[0] / 256.0,\n 'orig_g': stats_orig.mean[1] / 256.0,\n 'orig_b': stats_orig.mean[2] / 256.0,\n 'orig_complexity': complexity(pictureName, \"original\", original),\n }\n\n for i, mask in zip(MASK_NAMES, masks):\n stuff = results_for_mask(True, original, pictureName, 'aoi' + i, mask)\n results.update(stuff)\n\n # And finally we get the saliency image and resize it and do a bunch of garbage with it and the AOI masks\n\n do_saliency(original, masks, SALIENCY_DIR, \"saliency\", pictureName, results)\n do_saliency(original, masks, SUN_SALIENCY_DIR, \"sun_saliency\", pictureName, results)\n\n\n writer.writerow(results)\n if DEBUG: print(\"Generated stats for \" + filename)\n return True\n\n\n\nwith open('stats.csv', 'wb') as csvfile:\n per_mask_fields = [\n '_mask_lum',\n '_in_lum',\n '_in_r',\n '_in_g',\n '_in_b',\n '_in_complexity',\n '_out_lum',\n '_out_r',\n '_out_g',\n '_out_b',\n '_out_complexity',\n ]\n\n per_saliency_fields = [\n '_in_lum',\n '_out_lum',\n ]\n\n fields = [\n 'image_name',\n 'orig_lum',\n 'orig_r',\n 'orig_g',\n 'orig_b',\n 'orig_complexity',\n ]\n\n for i in MASK_NAMES:\n for f in per_mask_fields:\n fields.append(\"aoi{0}{1}\".format(i,f))\n\n fields.append(\"saliency_aoi_dotproduct_sum\")\n fields.append(\"saliency_lum\")\n\n for i in MASK_NAMES:\n for f in per_saliency_fields:\n fields.append(\"saliency{0}{1}\".format(i,f))\n\n fields.append(\"sun_saliency_aoi_dotproduct_sum\")\n fields.append(\"sun_saliency_lum\")\n\n for i in MASK_NAMES:\n for f in per_saliency_fields:\n fields.append(\"sun_saliency{0}{1}\".format(i,f))\n\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writerow(dict(zip(fields,fields)))\n\n for filename in sorted(os.listdir(IMG_DIR)):\n if not \".png\" in filename:\n continue\n\n pictureName = filename.replace(\".png\", \"\")\n\n try:\n write_stats(writer, filename, pictureName)\n\n except:\n print(\"Error on file \" + pictureName, file=sys.stderr)\n raise\n\n\n",
"step-ids": [
6,
8,
12,
13,
17
]
}
|
[
6,
8,
12,
13,
17
] |
'''This class contains a custom made format for printing complex numbers'''
class ComplexCustom(complex):
'''
This class contains function for
a custom made printing format for complex numbers
'''
def __format__(self, fmt):
'''This function creates a custom made format for printing complex numbers'''
cfmt = "({:" + fmt + "}{:+" + fmt + "}j)"
return cfmt.format(self.real, self.imag)
|
normal
|
{
"blob_id": "c62647b0b226d97926d1f53975a7aac7c39949d8",
"index": 7959,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ComplexCustom(complex):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ComplexCustom(complex):\n <mask token>\n\n def __format__(self, fmt):\n \"\"\"This function creates a custom made format for printing complex numbers\"\"\"\n cfmt = '({:' + fmt + '}{:+' + fmt + '}j)'\n return cfmt.format(self.real, self.imag)\n",
"step-4": "<mask token>\n\n\nclass ComplexCustom(complex):\n \"\"\"\n This class contains function for\n a custom made printing format for complex numbers\n \"\"\"\n\n def __format__(self, fmt):\n \"\"\"This function creates a custom made format for printing complex numbers\"\"\"\n cfmt = '({:' + fmt + '}{:+' + fmt + '}j)'\n return cfmt.format(self.real, self.imag)\n",
"step-5": "'''This class contains a custom made format for printing complex numbers'''\nclass ComplexCustom(complex):\n '''\n This class contains function for\n a custom made printing format for complex numbers\n '''\n def __format__(self, fmt):\n '''This function creates a custom made format for printing complex numbers'''\n cfmt = \"({:\" + fmt + \"}{:+\" + fmt + \"}j)\"\n return cfmt.format(self.real, self.imag)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from math import *
def heron(a, b, c):
tmp = [a, b, c]
tmp.sort()
if tmp[0] + tmp[1] <= tmp[-1]:
raise ValueError ("Warunek trojkata jest nie spelniony")
halfPerimeter = (a + b + c)/2
return sqrt(halfPerimeter * (halfPerimeter - a)*(halfPerimeter-b)*(halfPerimeter-c))
print heron(7, 4, 3)
|
normal
|
{
"blob_id": "bbd421d39894af163b56e7104c3b29a45635d5a3",
"index": 5425,
"step-1": "from math import *\r\n\r\ndef heron(a, b, c):\r\n tmp = [a, b, c]\r\n tmp.sort()\r\n if tmp[0] + tmp[1] <= tmp[-1]:\r\n raise ValueError (\"Warunek trojkata jest nie spelniony\")\r\n halfPerimeter = (a + b + c)/2\r\n return sqrt(halfPerimeter * (halfPerimeter - a)*(halfPerimeter-b)*(halfPerimeter-c))\r\n\r\nprint heron(7, 4, 3)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
from cv2 import cv2
from tqdm import tqdm
import os
import pickle
import numpy as np
import csv
import sys
from collections import defaultdict
from dataset_utils import *
sys.path.append("../training")
from dataset_tools import enclosing_square, add_margin, DataGenerator
EXT_ROOT = os.path.dirname(os.path.abspath(__file__))
rafdb_labels = {
"age_group": {
"0-3": 0,
"4-19": 1,
"20-39": 2,
"40-69": 3,
"70+":4
},
"race": {
"Caucasian": 0,
"African-American": 1,
"Asian": 2
}
}
# converted labels
rafDBmeta = defaultdict(dict)
# multitask labels
rafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose
rafDBdata = None # dict({image_path: ... }) # for ensembling purpose
# ORDER: Gender, Age, Ethnicity, Emotion
def _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):
global rafDBdata
if rafDBdata is None:
rafDBdata = dict()
i, errors = 0, defaultdict(set)
for image_path, image_meta in input_meta.items():
identity = image_meta["identity"]
roi = None # aligned image, roi is the image size
rafDBdata[image_path] = {
"roi" : roi,
"identity" : identity,
"gender" : get_gender_label(image_meta["gender"]) if include_gender else MASK_VALUE,
"age_group" : get_age_group_label(image_meta["age_group"]) if include_age_group else MASK_VALUE,
"ethnicity": get_ethnicity_label(image_meta["race"]) if include_race else MASK_VALUE,
"emotion": get_emotion_label(image_meta["emotion"]),
"sample_num" : i
}
i += 1
print("Metadata:", len(rafDBdata))
if errors:
print("Gender errors", errors["gender"])
print("Age errors", errors["age"])
print("Ethnicity errors", errors["ethnicity"])
# Labelling
def get_gender_label(gender):
if gender == 'male':
return LABELS["gender"]["male"]
elif gender == 'female':
return LABELS["gender"]["female"]
return MASK_VALUE
def get_age_group_label(age_group_text):
return rafdb_labels["age_group"][age_group_text]
def get_ethnicity_label(ethnicity_text):
return rafdb_labels["race"][ethnicity_text]
def get_emotion_label(emotion):
return LABELS["emotion"][emotion]
# Load from csv
def _load_meta_from_csv(csv_meta, output_dict):
data = readcsv(csv_meta)
for row in data:
output_dict[row[0]]["gender"] = row[1]
output_dict[row[0]]["age_group"] = row[2]
output_dict[row[0]]["race"] = row[3]
output_dict[row[0]]["emotion"] = row[4]
output_dict[row[0]]["identity"] = row[0].split("_")[1]
def get_partition(identity_label):
global rafDBpartition
try:
faces, partition = rafDBpartition[identity_label]
rafDBpartition[identity_label] = (faces + 1, partition)
except KeyError:
# split 20/80 stratified by identity
l = (len(rafDBpartition) - 1) % 10
if l == 0 or l == 1:
partition = PARTITION_VAL
else:
partition = PARTITION_TRAIN
rafDBpartition[identity_label] = (1, partition)
return partition
def _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):
data = list()
discarded_items = defaultdict(list)
for image_path, image_meta in tqdm(rafDBdata.items()):
path = os.path.join(imagesdir, image_path)
if ALIGNED:
path = os.path.splitext(path)
path = path[0] + "_aligned" + path[1]
identity = image_meta["identity"]
image = cv2.imread(path)
if image is None:
print("WARNING! Unable to read {}".format(image_path))
print(" - At {}".format(path))
discarded_items["unavailable_image"].append(identity)
continue
if np.max(image) == np.min(image):
print("Blank image {}".format(image_path))
discarded_items["blank_image"].append(identity)
continue
sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)
gender = rafDBdata[image_path]["gender"]
age = rafDBdata[image_path]["age_group"]
ethnicity = rafDBdata[image_path]["ethnicity"]
emotion = rafDBdata[image_path]["emotion"]
labels = (gender, age, ethnicity, emotion)
roi = (0, 0, image.shape[1], image.shape[0]) if image_meta["roi"] is None else image_meta["roi"]
sample = {
'img': path,
'label': labels,
'roi': roi,
'part': sample_partition
}
data.append(sample)
if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:
print("Stopped loading. Debug max samples: ", debug_max_num_samples)
break
print("Data loaded. {} samples".format(len(data)))
print("Discarded for unavailable image: ", len(discarded_items["unavailable_image"]))
print("Discarded for blank image: ", len(discarded_items["blank_image"]))
return data
ALIGNED = True
class RAFDBMulti:
def __init__(self,
partition='train',
imagesdir='data/RAF-DB/basic/Image/{aligned}',
csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',
target_shape=(112, 112, 3),
augment=True,
custom_augmentation=None,
preprocessing='full_normalization',
debug_max_num_samples=None,
include_gender=False,
include_age_group=False,
include_race=False,
**kwargs):
partition_label = partition_select(partition)
self.target_shape = target_shape
self.custom_augmentation = custom_augmentation
self.augment = augment
self.gen = None
self.preprocessing = preprocessing
print('Loading %s data...' % partition)
num_samples = "_" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''
cache_task = "{}{}{}_emotion".format(
"_withgender" if include_gender else "",
"_withagegroup" if include_age_group else "",
"_withrace" if include_race else ""
)
cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)
cache_file_name = os.path.join("cache", cache_file_name)
cache_file_name = os.path.join(EXT_ROOT, cache_file_name)
print("cache file name %s" % cache_file_name)
try:
with open(cache_file_name, 'rb') as f:
self.data = pickle.load(f)[:debug_max_num_samples]
print("Data loaded. %d samples, from cache" % (len(self.data)))
except FileNotFoundError:
print("Loading %s data from scratch" % partition)
load_partition = "train" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else "test"
imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned="aligned" if ALIGNED else "original"))
csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))
_load_meta_from_csv(csvmeta, rafDBmeta)
_load_traits(rafDBmeta, include_gender, include_age_group, include_race)
print("Loading {} dataset".format(partition))
loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)
print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)
if partition.startswith('test'):
self.data = loaded_data
else:
self.data = [x for x in loaded_data if x['part'] == partition_label]
with open(cache_file_name, 'wb') as f:
print("Pickle dumping")
pickle.dump(self.data, f)
def get_data(self):
return self.data
def get_num_samples(self):
return len(self.data)
def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):
if self.gen is None:
self.gen = DataGenerator(data=self.data,
target_shape=self.target_shape,
with_augmentation=self.augment,
custom_augmentation=self.custom_augmentation,
batch_size=batch_size,
num_classes=self.get_num_classes(),
preprocessing=self.preprocessing,
fullinfo=fullinfo,
doublelabel=doublelabel)
return self.gen
def get_num_classes(self):
return CLASSES
def test_multi(dataset="test", debug_samples=None):
if dataset.startswith("train") or dataset.startswith("val"):
print(dataset, debug_samples if debug_samples is not None else '')
dt = RAFDBMulti(dataset,
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dt.get_generator()
else:
dv = RAFDBMulti('test',
target_shape=(112, 112, 3),
preprocessing='vggface2',
debug_max_num_samples=debug_samples)
gen = dv.get_generator()
i = 0
for batch in tqdm(gen):
for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]):
facemax = np.max(im)
facemin = np.min(im)
print("Sample:", i)
print("Labels:", gender, age, ethnicity, emotion)
print("Gender:", verbose_gender(gender),
"- Age:", verbose_age(age),
"- Ethnicity:", verbose_ethnicity(ethnicity),
"- Emotion:", verbose_emotion(emotion))
im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8)
cv2.putText(im, "{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)),
(0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))
cv2.imshow("{} {} {} {}".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im)
i += 1
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
return
if '__main__' == __name__:
test_multi("train")
test_multi("val")
test_multi("test")
|
normal
|
{
"blob_id": "0b7d1564ecbd78086d59629a2058716f41b4b8c8",
"index": 9686,
"step-1": "<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\n<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _load_traits(input_meta, include_gender=False, include_age_group=False,\n include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta['identity']\n roi = None\n rafDBdata[image_path] = {'roi': roi, 'identity': identity,\n 'gender': get_gender_label(image_meta['gender']) if\n include_gender else MASK_VALUE, 'age_group': \n get_age_group_label(image_meta['age_group']) if\n include_age_group else MASK_VALUE, 'ethnicity': \n get_ethnicity_label(image_meta['race']) if include_race else\n MASK_VALUE, 'emotion': get_emotion_label(image_meta[\n 'emotion']), 'sample_num': i}\n i += 1\n print('Metadata:', len(rafDBdata))\n if errors:\n print('Gender errors', errors['gender'])\n print('Age errors', errors['age'])\n print('Ethnicity errors', errors['ethnicity'])\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\n<mask token>\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\n<mask token>\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\n<mask token>\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\n<mask token>\n",
"step-4": "import warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom cv2 import cv2\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport numpy as np\nimport csv\nimport sys\nfrom collections import defaultdict\nfrom dataset_utils import *\nsys.path.append('../training')\nfrom dataset_tools import enclosing_square, add_margin, DataGenerator\nEXT_ROOT = os.path.dirname(os.path.abspath(__file__))\nrafdb_labels = {'age_group': {'0-3': 0, '4-19': 1, '20-39': 2, '40-69': 3,\n '70+': 4}, 'race': {'Caucasian': 0, 'African-American': 1, 'Asian': 2}}\nrafDBmeta = defaultdict(dict)\nrafDBpartition = dict()\nrafDBdata = None\n\n\ndef _load_traits(input_meta, include_gender=False, include_age_group=False,\n include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta['identity']\n roi = None\n rafDBdata[image_path] = {'roi': roi, 'identity': identity,\n 'gender': get_gender_label(image_meta['gender']) if\n include_gender else MASK_VALUE, 'age_group': \n get_age_group_label(image_meta['age_group']) if\n include_age_group else MASK_VALUE, 'ethnicity': \n get_ethnicity_label(image_meta['race']) if include_race else\n MASK_VALUE, 'emotion': get_emotion_label(image_meta[\n 'emotion']), 'sample_num': i}\n i += 1\n print('Metadata:', len(rafDBdata))\n if errors:\n print('Gender errors', errors['gender'])\n print('Age errors', errors['age'])\n print('Ethnicity errors', errors['ethnicity'])\n\n\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS['gender']['male']\n elif gender == 'female':\n return LABELS['gender']['female']\n return MASK_VALUE\n\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels['age_group'][age_group_text]\n\n\ndef get_ethnicity_label(ethnicity_text):\n return rafdb_labels['race'][ethnicity_text]\n\n\ndef get_emotion_label(emotion):\n return LABELS['emotion'][emotion]\n\n\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]]['gender'] = row[1]\n output_dict[row[0]]['age_group'] = row[2]\n output_dict[row[0]]['race'] = row[3]\n output_dict[row[0]]['emotion'] = row[4]\n output_dict[row[0]]['identity'] = row[0].split('_')[1]\n\n\ndef get_partition(identity_label):\n global rafDBpartition\n try:\n faces, partition = rafDBpartition[identity_label]\n rafDBpartition[identity_label] = faces + 1, partition\n except KeyError:\n l = (len(rafDBpartition) - 1) % 10\n if l == 0 or l == 1:\n partition = PARTITION_VAL\n else:\n partition = PARTITION_TRAIN\n rafDBpartition[identity_label] = 1, partition\n return partition\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + '_aligned' + path[1]\n identity = image_meta['identity']\n image = cv2.imread(path)\n if image is None:\n print('WARNING! Unable to read {}'.format(image_path))\n print(' - At {}'.format(path))\n discarded_items['unavailable_image'].append(identity)\n continue\n if np.max(image) == np.min(image):\n print('Blank image {}'.format(image_path))\n discarded_items['blank_image'].append(identity)\n continue\n sample_partition = (PARTITION_TEST if partition_label ==\n PARTITION_TEST else get_partition(identity))\n gender = rafDBdata[image_path]['gender']\n age = rafDBdata[image_path]['age_group']\n ethnicity = rafDBdata[image_path]['ethnicity']\n emotion = rafDBdata[image_path]['emotion']\n labels = gender, age, ethnicity, emotion\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta['roi'\n ] is None else image_meta['roi']\n sample = {'img': path, 'label': labels, 'roi': roi, 'part':\n sample_partition}\n data.append(sample)\n if debug_max_num_samples is not None and len(data\n ) >= debug_max_num_samples:\n print('Stopped loading. Debug max samples: ', debug_max_num_samples\n )\n break\n print('Data loaded. {} samples'.format(len(data)))\n print('Discarded for unavailable image: ', len(discarded_items[\n 'unavailable_image']))\n print('Discarded for blank image: ', len(discarded_items['blank_image']))\n return data\n\n\nALIGNED = True\n\n\nclass RAFDBMulti:\n\n def __init__(self, partition='train', imagesdir=\n 'data/RAF-DB/basic/Image/{aligned}', csvmeta=\n 'data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3), augment=True, custom_augmentation=None,\n preprocessing='full_normalization', debug_max_num_samples=None,\n include_gender=False, include_age_group=False, include_race=False,\n **kwargs):\n partition_label = partition_select(partition)\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n num_samples = '_' + str(debug_max_num_samples\n ) if debug_max_num_samples is not None else ''\n cache_task = '{}{}{}_emotion'.format('_withgender' if\n include_gender else '', '_withagegroup' if include_age_group else\n '', '_withrace' if include_race else '')\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(\n task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join('cache', cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print('cache file name %s' % cache_file_name)\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print('Data loaded. %d samples, from cache' % len(self.data))\n except FileNotFoundError:\n print('Loading %s data from scratch' % partition)\n load_partition = ('train' if partition_label == PARTITION_TRAIN or\n partition_label == PARTITION_VAL else 'test')\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\n 'aligned' if ALIGNED else 'original'))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=\n load_partition))\n _load_meta_from_csv(csvmeta, rafDBmeta)\n _load_traits(rafDBmeta, include_gender, include_age_group,\n include_race)\n print('Loading {} dataset'.format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label,\n debug_max_num_samples)\n print_verbose_partition(dataset_partition=rafDBpartition,\n verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] ==\n partition_label]\n with open(cache_file_name, 'wb') as f:\n print('Pickle dumping')\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data, target_shape=self.\n target_shape, with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation, batch_size=\n batch_size, num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset='test', debug_samples=None):\n if dataset.startswith('train') or dataset.startswith('val'):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset, target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test', target_shape=(112, 112, 3), preprocessing=\n 'vggface2', debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0\n ], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print('Sample:', i)\n print('Labels:', gender, age, ethnicity, emotion)\n print('Gender:', verbose_gender(gender), '- Age:', verbose_age(\n age), '- Ethnicity:', verbose_ethnicity(ethnicity),\n '- Emotion:', verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8\n )\n cv2.putText(im, '{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), (0, im.shape[1]), cv2.\n FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow('{} {} {} {}'.format(verbose_gender(gender),\n verbose_age(age), verbose_ethnicity(ethnicity),\n verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 255 == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\nif '__main__' == __name__:\n test_multi('train')\n test_multi('val')\n test_multi('test')\n",
"step-5": "import warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom cv2 import cv2\nfrom tqdm import tqdm\nimport os\nimport pickle\nimport numpy as np\nimport csv\nimport sys\nfrom collections import defaultdict\n\nfrom dataset_utils import *\n\nsys.path.append(\"../training\")\nfrom dataset_tools import enclosing_square, add_margin, DataGenerator\n\nEXT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nrafdb_labels = {\n \"age_group\": {\n \"0-3\": 0,\n \"4-19\": 1,\n \"20-39\": 2,\n \"40-69\": 3,\n \"70+\":4 \n },\n \"race\": {\n \"Caucasian\": 0,\n \"African-American\": 1,\n \"Asian\": 2\n }\n}\n\n# converted labels\nrafDBmeta = defaultdict(dict)\n\n# multitask labels\nrafDBpartition = dict() # dict({id:partition or None}) # for partitioning purpose\nrafDBdata = None # dict({image_path: ... }) # for ensembling purpose\n\n\n# ORDER: Gender, Age, Ethnicity, Emotion\ndef _load_traits(input_meta, include_gender=False, include_age_group=False, include_race=False):\n global rafDBdata\n if rafDBdata is None:\n rafDBdata = dict()\n i, errors = 0, defaultdict(set)\n for image_path, image_meta in input_meta.items():\n identity = image_meta[\"identity\"]\n roi = None # aligned image, roi is the image size\n rafDBdata[image_path] = {\n \"roi\" : roi,\n \"identity\" : identity,\n \"gender\" : get_gender_label(image_meta[\"gender\"]) if include_gender else MASK_VALUE,\n \"age_group\" : get_age_group_label(image_meta[\"age_group\"]) if include_age_group else MASK_VALUE,\n \"ethnicity\": get_ethnicity_label(image_meta[\"race\"]) if include_race else MASK_VALUE,\n \"emotion\": get_emotion_label(image_meta[\"emotion\"]),\n \"sample_num\" : i\n }\n i += 1 \n print(\"Metadata:\", len(rafDBdata))\n if errors:\n print(\"Gender errors\", errors[\"gender\"])\n print(\"Age errors\", errors[\"age\"])\n print(\"Ethnicity errors\", errors[\"ethnicity\"])\n\n\n# Labelling\ndef get_gender_label(gender):\n if gender == 'male':\n return LABELS[\"gender\"][\"male\"]\n elif gender == 'female':\n return LABELS[\"gender\"][\"female\"]\n return MASK_VALUE\n\ndef get_age_group_label(age_group_text):\n return rafdb_labels[\"age_group\"][age_group_text]\n\ndef get_ethnicity_label(ethnicity_text):\n return rafdb_labels[\"race\"][ethnicity_text]\n\ndef get_emotion_label(emotion):\n return LABELS[\"emotion\"][emotion]\n\n\n# Load from csv\ndef _load_meta_from_csv(csv_meta, output_dict):\n data = readcsv(csv_meta)\n for row in data:\n output_dict[row[0]][\"gender\"] = row[1]\n output_dict[row[0]][\"age_group\"] = row[2]\n output_dict[row[0]][\"race\"] = row[3]\n output_dict[row[0]][\"emotion\"] = row[4]\n output_dict[row[0]][\"identity\"] = row[0].split(\"_\")[1]\n\n\ndef get_partition(identity_label): \n global rafDBpartition\n try:\n faces, partition = rafDBpartition[identity_label]\n rafDBpartition[identity_label] = (faces + 1, partition)\n except KeyError:\n # split 20/80 stratified by identity\n l = (len(rafDBpartition) - 1) % 10\n if l == 0 or l == 1:\n partition = PARTITION_VAL\n else:\n partition = PARTITION_TRAIN\n rafDBpartition[identity_label] = (1, partition)\n return partition\n\n\ndef _load_dataset(imagesdir, partition_label, debug_max_num_samples=None):\n data = list()\n discarded_items = defaultdict(list)\n\n for image_path, image_meta in tqdm(rafDBdata.items()):\n path = os.path.join(imagesdir, image_path)\n if ALIGNED:\n path = os.path.splitext(path)\n path = path[0] + \"_aligned\" + path[1]\n identity = image_meta[\"identity\"]\n image = cv2.imread(path)\n if image is None:\n print(\"WARNING! Unable to read {}\".format(image_path))\n print(\" - At {}\".format(path))\n discarded_items[\"unavailable_image\"].append(identity)\n continue\n if np.max(image) == np.min(image):\n print(\"Blank image {}\".format(image_path))\n discarded_items[\"blank_image\"].append(identity)\n continue\n sample_partition = PARTITION_TEST if partition_label == PARTITION_TEST else get_partition(identity)\n gender = rafDBdata[image_path][\"gender\"]\n age = rafDBdata[image_path][\"age_group\"]\n ethnicity = rafDBdata[image_path][\"ethnicity\"]\n emotion = rafDBdata[image_path][\"emotion\"]\n labels = (gender, age, ethnicity, emotion)\n roi = (0, 0, image.shape[1], image.shape[0]) if image_meta[\"roi\"] is None else image_meta[\"roi\"] \n sample = {\n 'img': path,\n 'label': labels,\n 'roi': roi,\n 'part': sample_partition\n }\n data.append(sample)\n if debug_max_num_samples is not None and len(data) >= debug_max_num_samples:\n print(\"Stopped loading. Debug max samples: \", debug_max_num_samples)\n break\n print(\"Data loaded. {} samples\".format(len(data)))\n print(\"Discarded for unavailable image: \", len(discarded_items[\"unavailable_image\"]))\n print(\"Discarded for blank image: \", len(discarded_items[\"blank_image\"]))\n return data\n\n\nALIGNED = True\n\nclass RAFDBMulti:\n def __init__(self,\n partition='train',\n imagesdir='data/RAF-DB/basic/Image/{aligned}',\n csvmeta='data/RAF-DB/basic/multitask/{part}.multitask_rafdb.csv',\n target_shape=(112, 112, 3),\n augment=True,\n custom_augmentation=None,\n preprocessing='full_normalization',\n debug_max_num_samples=None,\n include_gender=False,\n include_age_group=False,\n include_race=False,\n **kwargs):\n \n partition_label = partition_select(partition)\n\n self.target_shape = target_shape\n self.custom_augmentation = custom_augmentation\n self.augment = augment\n self.gen = None\n self.preprocessing = preprocessing\n print('Loading %s data...' % partition)\n\n num_samples = \"_\" + str(debug_max_num_samples) if debug_max_num_samples is not None else ''\n cache_task = \"{}{}{}_emotion\".format(\n \"_withgender\" if include_gender else \"\",\n \"_withagegroup\" if include_age_group else \"\",\n \"_withrace\" if include_race else \"\"\n )\n cache_file_name = 'rafdb{task}_{partition}{num_samples}.cache'.format(task=cache_task, partition=partition, num_samples=num_samples)\n cache_file_name = os.path.join(\"cache\", cache_file_name)\n cache_file_name = os.path.join(EXT_ROOT, cache_file_name)\n print(\"cache file name %s\" % cache_file_name)\n\n try:\n with open(cache_file_name, 'rb') as f:\n self.data = pickle.load(f)[:debug_max_num_samples]\n print(\"Data loaded. %d samples, from cache\" % (len(self.data)))\n except FileNotFoundError:\n print(\"Loading %s data from scratch\" % partition)\n load_partition = \"train\" if partition_label == PARTITION_TRAIN or partition_label == PARTITION_VAL else \"test\"\n\n imagesdir = os.path.join(EXT_ROOT, imagesdir.format(aligned=\"aligned\" if ALIGNED else \"original\"))\n csvmeta = os.path.join(EXT_ROOT, csvmeta.format(part=load_partition))\n\n _load_meta_from_csv(csvmeta, rafDBmeta)\n\n _load_traits(rafDBmeta, include_gender, include_age_group, include_race)\n \n print(\"Loading {} dataset\".format(partition))\n loaded_data = _load_dataset(imagesdir, partition_label, debug_max_num_samples)\n\n print_verbose_partition(dataset_partition=rafDBpartition, verbosed_partition=partition_label)\n if partition.startswith('test'):\n self.data = loaded_data\n else:\n self.data = [x for x in loaded_data if x['part'] == partition_label]\n with open(cache_file_name, 'wb') as f:\n print(\"Pickle dumping\")\n pickle.dump(self.data, f)\n\n def get_data(self):\n return self.data\n\n def get_num_samples(self):\n return len(self.data)\n\n def get_generator(self, batch_size=64, fullinfo=False, doublelabel=False):\n if self.gen is None:\n self.gen = DataGenerator(data=self.data,\n target_shape=self.target_shape,\n with_augmentation=self.augment,\n custom_augmentation=self.custom_augmentation,\n batch_size=batch_size,\n num_classes=self.get_num_classes(),\n preprocessing=self.preprocessing, \n fullinfo=fullinfo,\n doublelabel=doublelabel)\n return self.gen\n\n def get_num_classes(self):\n return CLASSES\n\n\ndef test_multi(dataset=\"test\", debug_samples=None):\n\n if dataset.startswith(\"train\") or dataset.startswith(\"val\"):\n print(dataset, debug_samples if debug_samples is not None else '')\n dt = RAFDBMulti(dataset,\n target_shape=(112, 112, 3),\n preprocessing='vggface2',\n debug_max_num_samples=debug_samples)\n gen = dt.get_generator()\n else:\n dv = RAFDBMulti('test',\n target_shape=(112, 112, 3),\n preprocessing='vggface2',\n debug_max_num_samples=debug_samples)\n gen = dv.get_generator()\n i = 0\n for batch in tqdm(gen):\n for im, gender, age, ethnicity, emotion in zip(batch[0], batch[1][0], batch[1][1], batch[1][2], batch[1][3]):\n facemax = np.max(im)\n facemin = np.min(im)\n print(\"Sample:\", i)\n print(\"Labels:\", gender, age, ethnicity, emotion)\n print(\"Gender:\", verbose_gender(gender),\n \"- Age:\", verbose_age(age),\n \"- Ethnicity:\", verbose_ethnicity(ethnicity),\n \"- Emotion:\", verbose_emotion(emotion))\n im = (255 * ((im - facemin) / (facemax - facemin))).astype(np.uint8)\n cv2.putText(im, \"{} {} {} {}\".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)),\n (0, im.shape[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255))\n cv2.imshow(\"{} {} {} {}\".format(verbose_gender(gender), verbose_age(age), verbose_ethnicity(ethnicity), verbose_emotion(emotion)), im)\n i += 1\n if cv2.waitKey(0) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n return\n\n\nif '__main__' == __name__:\n test_multi(\"train\")\n test_multi(\"val\")\n test_multi(\"test\")\n",
"step-ids": [
9,
12,
13,
18,
19
]
}
|
[
9,
12,
13,
18,
19
] |
from torch import nn
class MNIST3dModel(nn.Module):
def __init__(self, input_c=3, num_filters=8, num_classes=10):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=
num_filters, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=
num_filters * 2, kernel_size=3, stride=1, padding=1)
self.batchnorm1 = nn.BatchNorm3d(16)
self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=
num_filters * 4, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=
num_filters * 8, kernel_size=3, stride=1, padding=1)
self.batchnorm2 = nn.BatchNorm3d(64)
self.pool = nn.MaxPool3d(2)
self.dropout1 = nn.Dropout(0.25)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(4096, 1024)
self.dropout2 = nn.Dropout(0.5)
self.linear2 = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.batchnorm1(x)
x = self.pool(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(x)
x = self.batchnorm2(x)
x = self.pool(x)
x = self.dropout1(x)
x = x.view(x.size()[0], -1)
x = self.linear1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.linear2(x)
return x
|
normal
|
{
"blob_id": "f6838906c961a9ca7d91d2ab02fd2af72797b880",
"index": 4628,
"step-1": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n <mask token>\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-3": "<mask token>\n\n\nclass MNIST3dModel(nn.Module):\n\n def __init__(self, input_c=3, num_filters=8, num_classes=10):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=\n num_filters, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=\n num_filters * 2, kernel_size=3, stride=1, padding=1)\n self.batchnorm1 = nn.BatchNorm3d(16)\n self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=\n num_filters * 4, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=\n num_filters * 8, kernel_size=3, stride=1, padding=1)\n self.batchnorm2 = nn.BatchNorm3d(64)\n self.pool = nn.MaxPool3d(2)\n self.dropout1 = nn.Dropout(0.25)\n self.relu = nn.ReLU()\n self.linear1 = nn.Linear(4096, 1024)\n self.dropout2 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-4": "from torch import nn\n\n\nclass MNIST3dModel(nn.Module):\n\n def __init__(self, input_c=3, num_filters=8, num_classes=10):\n super().__init__()\n self.conv1 = nn.Conv3d(in_channels=input_c, out_channels=\n num_filters, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv3d(in_channels=num_filters, out_channels=\n num_filters * 2, kernel_size=3, stride=1, padding=1)\n self.batchnorm1 = nn.BatchNorm3d(16)\n self.conv3 = nn.Conv3d(in_channels=num_filters * 2, out_channels=\n num_filters * 4, kernel_size=3, stride=1, padding=1)\n self.conv4 = nn.Conv3d(in_channels=num_filters * 4, out_channels=\n num_filters * 8, kernel_size=3, stride=1, padding=1)\n self.batchnorm2 = nn.BatchNorm3d(64)\n self.pool = nn.MaxPool3d(2)\n self.dropout1 = nn.Dropout(0.25)\n self.relu = nn.ReLU()\n self.linear1 = nn.Linear(4096, 1024)\n self.dropout2 = nn.Dropout(0.5)\n self.linear2 = nn.Linear(1024, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.relu(x)\n x = self.batchnorm1(x)\n x = self.pool(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.relu(x)\n x = self.batchnorm2(x)\n x = self.pool(x)\n x = self.dropout1(x)\n x = x.view(x.size()[0], -1)\n x = self.linear1(x)\n x = self.relu(x)\n x = self.dropout2(x)\n x = self.linear2(x)\n return x\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
# import models
from apps.qa.models.coupon import Coupon
from apps.qa.models.coupon_type import CouponType
COUPONTYPE_CHOICES = (
('text', _("text")),
('url', _("url")),
('questionnaire', _("questionnaire")),
)
class CouponForm(forms.ModelForm):
name = forms.CharField(max_length=64, label=_("Name"), required=True)
type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)
# these fields are JSON containers populated by custom BL
data = forms.CharField(max_length=64, required=False)
style = forms.CharField(max_length=64, required=False)
valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),
input_formats=['%Y-%m-%d %H:%M', ],
label=_("Valid From"),
required=True)
valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),
input_formats=['%Y-%m-%d %H:%M', ],
label=_("Valid Until"),
required=True)
def clean(self):
cleaned_type = self.cleaned_data.get('type')
real_type = CouponType.objects.filter(name=cleaned_type).first()
if not real_type:
raise forms.ValidationError(_("Sorry, that coupon type cannot be found."))
else:
self.cleaned_data['type'] = real_type
return self.cleaned_data
class Meta:
model = Coupon
fields = ('name', 'type', 'data', 'style', 'valid_from', 'valid_until')
|
normal
|
{
"blob_id": "a0f83f0a2c6ddaa2fc641bd4fa48a6f50fd1d978",
"index": 1755,
"step-1": "<mask token>\n\n\nclass CouponForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-2": "<mask token>\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-3": "<mask token>\nCOUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',\n _('questionnaire'))\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-4": "from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom apps.qa.models.coupon import Coupon\nfrom apps.qa.models.coupon_type import CouponType\nCOUPONTYPE_CHOICES = ('text', _('text')), ('url', _('url')), ('questionnaire',\n _('questionnaire'))\n\n\nclass CouponForm(forms.ModelForm):\n name = forms.CharField(max_length=64, label=_('Name'), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid From'), required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={\n 'class': 'datepicker'}, format='%Y-%m-%d %H:%M'), input_formats=[\n '%Y-%m-%d %H:%M'], label=_('Valid Until'), required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\n 'Sorry, that coupon type cannot be found.'))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n\n class Meta:\n model = Coupon\n fields = 'name', 'type', 'data', 'style', 'valid_from', 'valid_until'\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\n# import models\nfrom apps.qa.models.coupon import Coupon\nfrom apps.qa.models.coupon_type import CouponType\n\n\nCOUPONTYPE_CHOICES = (\n ('text', _(\"text\")),\n ('url', _(\"url\")),\n ('questionnaire', _(\"questionnaire\")),\n)\n\nclass CouponForm(forms.ModelForm):\n\n name = forms.CharField(max_length=64, label=_(\"Name\"), required=True)\n type = forms.ChoiceField(choices=COUPONTYPE_CHOICES)\n\n # these fields are JSON containers populated by custom BL\n data = forms.CharField(max_length=64, required=False)\n style = forms.CharField(max_length=64, required=False)\n\n valid_from = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),\n input_formats=['%Y-%m-%d %H:%M', ],\n label=_(\"Valid From\"),\n required=True)\n valid_until = forms.DateTimeField(widget=forms.DateTimeInput(attrs={'class': 'datepicker'}, format='%Y-%m-%d %H:%M'),\n input_formats=['%Y-%m-%d %H:%M', ],\n label=_(\"Valid Until\"),\n required=True)\n\n def clean(self):\n cleaned_type = self.cleaned_data.get('type')\n real_type = CouponType.objects.filter(name=cleaned_type).first()\n if not real_type:\n raise forms.ValidationError(_(\"Sorry, that coupon type cannot be found.\"))\n else:\n self.cleaned_data['type'] = real_type\n return self.cleaned_data\n\n class Meta:\n model = Coupon\n fields = ('name', 'type', 'data', 'style', 'valid_from', 'valid_until')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python3
import os
import netifaces
# nicList = netifaces.interfaces()
NICList = [i for i in netifaces.interfaces() if i != "lo"]
for i in NICList:
os.system("sudo ifconfig " + i + " promisc")
os.system("sudo python ./src/top.py")
|
normal
|
{
"blob_id": "b38d23a7de3c805ddde4ed2d236e3c6e7bb5e2d0",
"index": 118,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in NICList:\n os.system('sudo ifconfig ' + i + ' promisc')\nos.system('sudo python ./src/top.py')\n",
"step-3": "<mask token>\nNICList = [i for i in netifaces.interfaces() if i != 'lo']\nfor i in NICList:\n os.system('sudo ifconfig ' + i + ' promisc')\nos.system('sudo python ./src/top.py')\n",
"step-4": "import os\nimport netifaces\nNICList = [i for i in netifaces.interfaces() if i != 'lo']\nfor i in NICList:\n os.system('sudo ifconfig ' + i + ' promisc')\nos.system('sudo python ./src/top.py')\n",
"step-5": "#!/usr/bin/python3\nimport os\nimport netifaces\n\n# nicList = netifaces.interfaces()\nNICList = [i for i in netifaces.interfaces() if i != \"lo\"]\n\nfor i in NICList:\n os.system(\"sudo ifconfig \" + i + \" promisc\")\nos.system(\"sudo python ./src/top.py\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class spotinst(terrascript.Provider):
pass
<|reserved_special_token_1|>
import terrascript
class spotinst(terrascript.Provider):
pass
<|reserved_special_token_1|>
# terrascript/spotinst/__init__.py
import terrascript
class spotinst(terrascript.Provider):
pass
|
flexible
|
{
"blob_id": "0ae626df5a471af77f7361bb765b46b861ee8a2c",
"index": 7142,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass spotinst(terrascript.Provider):\n pass\n",
"step-3": "import terrascript\n\n\nclass spotinst(terrascript.Provider):\n pass\n",
"step-4": "# terrascript/spotinst/__init__.py\n\nimport terrascript\n\nclass spotinst(terrascript.Provider):\n pass",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from cftime import num2date
import os
import numpy as np
from datetime import datetime, timedelta, date
def plot_temperatures_by_country(values, country, start, end):
"""
Returns a plot for temperature values for a country
from a start point to an end point
"""
filtered = values.loc[(values['Country'] == country) &
(values['dt'] >= start) &
(values['dt'] <= end)]
# x axis values
x1 = filtered['dt']
# corresponding y axis values
y1 = filtered['AverageTemperature']
# plotting the points
plt.plot(x1, y1, label = "line 1")
filtered = values.loc[(values['Country'] == country) &
(values['dt'] >= '1973-01-01') &
(values['dt'] <= '1974-01-01')]
# x axis values
x2 = filtered['dt']
# corresponding y axis values
y2 = filtered['AverageTemperature']
# plotting the points
plt.plot(x2, y2, label="line 2")
# naming the x axis
plt.xlabel('x - axis - date')
# naming the y axis
plt.ylabel('y - axis - temperature')
plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)
# function to show the plot
plt.show()
def temperatures_by_city_till2013():
"""
Info for dataset, temperatures by city part 1 - from 1743 to 2013
"""
# Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude
temperatures = pd.read_csv("GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv")
# 8 599 212 rows
print(len(temperatures))
countries = temperatures['Country'].unique()
print(len(countries))
print(sorted(countries))
def temperatures_by_country_till2013():
"""
Info for dataset, temperatures by country part 1 - from 1743 to 2013
"""
# Columns: dt, AverageTemperature, AverageTemperatureUncertainty, Country
temperatures = pd.read_csv("GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv")
# 577 462 rows
print(len(temperatures))
countries = temperatures['Country'].unique()
print(len(countries))
print(sorted(countries))
def plot_co2_by_country(values, country, start, end):
"""
Returns a plot for co2 values for a country
from a start point to an end point
"""
filtered = values.loc[(values['Country'] == country) &
(values['Year'] >= start) &
(values['Year'] <= end)]
# x axis values
x1 = filtered['Year']
# corresponding y axis values
y1 = filtered['CO2']
# plotting the points
plt.plot(x1, y1, label = "line 1")
# naming the x axis
plt.xlabel('x - axis - year')
# naming the y axis
plt.ylabel('y - axis - co2')
# giving a title to my graph
plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)
# function to show the plot
plt.show()
def co2_by_country_till2019():
"""
Info for dataset, co2 by country part 1 - from 1751 to 2017
"""
co2_messy = pd.read_csv("CO2/emission data.csv")
co2 = pd.melt(co2_messy, id_vars=["Country"], var_name="Year", value_name="CO2")
df = pd.DataFrame()
df['Country'] = co2['Country']
df['Year'] = co2['Year']
df['CO2'] = co2['CO2']
df.to_csv(r'C:\Users\stoja\Desktop\EmissionCO2.csv', index=False)
def get_lat_lon():
"""
Returns arrays for latitudes, longitudes, cities and countries
from dataset, temperatures by country part 1, from 1743 to 2013
"""
# Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude
temperatures = pd.read_csv("GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv")
Latitude = temperatures['Latitude']
Longitude = temperatures['Longitude']
City = temperatures['City']
Country = temperatures['Country']
lat_array = []
long_array = []
cities_array = []
countries_array = []
tuples = []
for i, j, city, country in zip(Latitude, Longitude, City, Country):
if (i, j) not in tuples:
tuples.append((i, j))
lat_array.append(float(i[:-1]))
long_array.append(float(j[:-1]))
cities_array.append(city)
countries_array.append(country)
return lat_array, long_array, cities_array, countries_array
def make_dataset_temperatures(filename, points):
"""
From netCDF4 file to CSV file
"""
ds = Dataset(filename)
lats, lons, cities, countries = get_lat_lon()
# total lat,lon pairs: 1366
print('The number of rows is ' + str(len(lats)*points))
lon = ds.variables['longitude']
lat = ds.variables['latitude']
time = ds.variables['date_number']
lon_array = lon[:]
lat_array = lat[:]
time_array = time[:]
temperature = ds.variables['temperature']
dates = []
for time in time_array[:]:
year = int(time)
rem = time - year
base = datetime(year, 1, 1)
dates.append((base + timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)).date())
# second approach
# for t in time_array[:]:
# dates.append(num2date(t, units=time.units))
dateResult = []
temperatureResult = []
latitudeResult = []
longitudeResult = []
cityResult = []
countryResult = []
for latitude, longitude, city, country in zip(lats, lons, cities, countries):
# We want to find data for latitude, longitude
# We first need to find the indexes
i = np.abs(lon_array - longitude).argmin()
j = np.abs(lat_array - latitude).argmin()
for d in dates:
dateResult.append(d)
resultTemperature = temperature[:, j, i]
for t in resultTemperature:
temperatureResult.append(t)
resultLatitues = np.full(
shape=points,
fill_value=latitude,
dtype=np.float
)
for l in resultLatitues:
latitudeResult.append(l)
resultLongitudes = np.full(
shape=points,
fill_value=longitude,
dtype=np.float
)
for l in resultLongitudes:
longitudeResult.append(l)
resultCities = np.full(
shape=points,
fill_value=city
)
for c in resultCities:
cityResult.append(c)
resultCountries = np.full(
shape=points,
fill_value=country
)
for c in resultCountries:
countryResult.append(c)
print('iteration no:' + str(i))
df = pd.DataFrame()
df['date'] = dateResult
df['temperature'] = temperatureResult
df['latitude'] = latitudeResult
df['longitude'] = longitudeResult
df['city'] = cityResult
df['country'] = countryResult
df.to_csv(r'C:\Users\stoja\Desktop\Temperatures.csv', index=False)
return df
def model():
# Info for netCDF4 file
# 1416
ds = Dataset('air.mon.mean.v501.nc')
print(ds)
time = ds.variables['time']
print(time.units)
time_array = time[:]
for t in time_array[:]:
print(num2date(t, units=time.units))
if __name__ == '__main__':
print('Start')
# Making the CO2 dataset
co2_by_country_till2019()
# Making the temperatures dataset
df1 = make_dataset_temperatures('air.mon.mean.v501.nc', 1416)
print(df1.head())
# Making the temperatures anomalies dataset
df2 = make_dataset_temperatures('Complete_TAVG_Daily_LatLong1_2010.nc', 3652)
print(df2.head())
|
normal
|
{
"blob_id": "2b579c3def4c2d02d365f019518e8e0b25664460",
"index": 7436,
"step-1": "<mask token>\n\n\ndef plot_temperatures_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for temperature values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n start) & (values['dt'] <= end)]\n x1 = filtered['dt']\n y1 = filtered['AverageTemperature']\n plt.plot(x1, y1, label='line 1')\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n '1973-01-01') & (values['dt'] <= '1974-01-01')]\n x2 = filtered['dt']\n y2 = filtered['AverageTemperature']\n plt.plot(x2, y2, label='line 2')\n plt.xlabel('x - axis - date')\n plt.ylabel('y - axis - temperature')\n plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef temperatures_by_city_till2013():\n \"\"\"\n Info for dataset, temperatures by city part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef temperatures_by_country_till2013():\n \"\"\"\n Info for dataset, temperatures by country part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef plot_co2_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for co2 values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['Year'] >=\n start) & (values['Year'] <= end)]\n x1 = filtered['Year']\n y1 = filtered['CO2']\n plt.plot(x1, y1, label='line 1')\n plt.xlabel('x - axis - year')\n plt.ylabel('y - axis - co2')\n plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\n<mask token>\n\n\ndef get_lat_lon():\n \"\"\"\n Returns arrays for latitudes, longitudes, cities and countries\n from dataset, temperatures by country part 1, from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n Latitude = temperatures['Latitude']\n Longitude = temperatures['Longitude']\n City = temperatures['City']\n Country = temperatures['Country']\n lat_array = []\n long_array = []\n cities_array = []\n countries_array = []\n tuples = []\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\n if (i, j) not in tuples:\n tuples.append((i, j))\n lat_array.append(float(i[:-1]))\n long_array.append(float(j[:-1]))\n cities_array.append(city)\n countries_array.append(country)\n return lat_array, long_array, cities_array, countries_array\n\n\ndef make_dataset_temperatures(filename, points):\n \"\"\"\n From netCDF4 file to CSV file\n \"\"\"\n ds = Dataset(filename)\n lats, lons, cities, countries = get_lat_lon()\n print('The number of rows is ' + str(len(lats) * points))\n lon = ds.variables['longitude']\n lat = ds.variables['latitude']\n time = ds.variables['date_number']\n lon_array = lon[:]\n lat_array = lat[:]\n time_array = time[:]\n temperature = ds.variables['temperature']\n dates = []\n for time in time_array[:]:\n year = int(time)\n rem = time - year\n base = datetime(year, 1, 1)\n dates.append((base + timedelta(seconds=(base.replace(year=base.year +\n 1) - base).total_seconds() * rem)).date())\n dateResult = []\n temperatureResult = []\n latitudeResult = []\n longitudeResult = []\n cityResult = []\n countryResult = []\n for latitude, longitude, city, country in zip(lats, lons, cities, countries\n ):\n i = np.abs(lon_array - longitude).argmin()\n j = np.abs(lat_array - latitude).argmin()\n for d in dates:\n dateResult.append(d)\n resultTemperature = temperature[:, j, i]\n for t in resultTemperature:\n temperatureResult.append(t)\n resultLatitues = np.full(shape=points, fill_value=latitude, dtype=\n np.float)\n for l in resultLatitues:\n latitudeResult.append(l)\n resultLongitudes = np.full(shape=points, fill_value=longitude,\n dtype=np.float)\n for l in resultLongitudes:\n longitudeResult.append(l)\n resultCities = np.full(shape=points, fill_value=city)\n for c in resultCities:\n cityResult.append(c)\n resultCountries = np.full(shape=points, fill_value=country)\n for c in resultCountries:\n countryResult.append(c)\n print('iteration no:' + str(i))\n df = pd.DataFrame()\n df['date'] = dateResult\n df['temperature'] = temperatureResult\n df['latitude'] = latitudeResult\n df['longitude'] = longitudeResult\n df['city'] = cityResult\n df['country'] = countryResult\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\Temperatures.csv', index=False)\n return df\n\n\ndef model():\n ds = Dataset('air.mon.mean.v501.nc')\n print(ds)\n time = ds.variables['time']\n print(time.units)\n time_array = time[:]\n for t in time_array[:]:\n print(num2date(t, units=time.units))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_temperatures_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for temperature values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n start) & (values['dt'] <= end)]\n x1 = filtered['dt']\n y1 = filtered['AverageTemperature']\n plt.plot(x1, y1, label='line 1')\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n '1973-01-01') & (values['dt'] <= '1974-01-01')]\n x2 = filtered['dt']\n y2 = filtered['AverageTemperature']\n plt.plot(x2, y2, label='line 2')\n plt.xlabel('x - axis - date')\n plt.ylabel('y - axis - temperature')\n plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef temperatures_by_city_till2013():\n \"\"\"\n Info for dataset, temperatures by city part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef temperatures_by_country_till2013():\n \"\"\"\n Info for dataset, temperatures by country part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef plot_co2_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for co2 values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['Year'] >=\n start) & (values['Year'] <= end)]\n x1 = filtered['Year']\n y1 = filtered['CO2']\n plt.plot(x1, y1, label='line 1')\n plt.xlabel('x - axis - year')\n plt.ylabel('y - axis - co2')\n plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef co2_by_country_till2019():\n \"\"\"\n Info for dataset, co2 by country part 1 - from 1751 to 2017\n \"\"\"\n co2_messy = pd.read_csv('CO2/emission data.csv')\n co2 = pd.melt(co2_messy, id_vars=['Country'], var_name='Year',\n value_name='CO2')\n df = pd.DataFrame()\n df['Country'] = co2['Country']\n df['Year'] = co2['Year']\n df['CO2'] = co2['CO2']\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\EmissionCO2.csv', index=False)\n\n\ndef get_lat_lon():\n \"\"\"\n Returns arrays for latitudes, longitudes, cities and countries\n from dataset, temperatures by country part 1, from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n Latitude = temperatures['Latitude']\n Longitude = temperatures['Longitude']\n City = temperatures['City']\n Country = temperatures['Country']\n lat_array = []\n long_array = []\n cities_array = []\n countries_array = []\n tuples = []\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\n if (i, j) not in tuples:\n tuples.append((i, j))\n lat_array.append(float(i[:-1]))\n long_array.append(float(j[:-1]))\n cities_array.append(city)\n countries_array.append(country)\n return lat_array, long_array, cities_array, countries_array\n\n\ndef make_dataset_temperatures(filename, points):\n \"\"\"\n From netCDF4 file to CSV file\n \"\"\"\n ds = Dataset(filename)\n lats, lons, cities, countries = get_lat_lon()\n print('The number of rows is ' + str(len(lats) * points))\n lon = ds.variables['longitude']\n lat = ds.variables['latitude']\n time = ds.variables['date_number']\n lon_array = lon[:]\n lat_array = lat[:]\n time_array = time[:]\n temperature = ds.variables['temperature']\n dates = []\n for time in time_array[:]:\n year = int(time)\n rem = time - year\n base = datetime(year, 1, 1)\n dates.append((base + timedelta(seconds=(base.replace(year=base.year +\n 1) - base).total_seconds() * rem)).date())\n dateResult = []\n temperatureResult = []\n latitudeResult = []\n longitudeResult = []\n cityResult = []\n countryResult = []\n for latitude, longitude, city, country in zip(lats, lons, cities, countries\n ):\n i = np.abs(lon_array - longitude).argmin()\n j = np.abs(lat_array - latitude).argmin()\n for d in dates:\n dateResult.append(d)\n resultTemperature = temperature[:, j, i]\n for t in resultTemperature:\n temperatureResult.append(t)\n resultLatitues = np.full(shape=points, fill_value=latitude, dtype=\n np.float)\n for l in resultLatitues:\n latitudeResult.append(l)\n resultLongitudes = np.full(shape=points, fill_value=longitude,\n dtype=np.float)\n for l in resultLongitudes:\n longitudeResult.append(l)\n resultCities = np.full(shape=points, fill_value=city)\n for c in resultCities:\n cityResult.append(c)\n resultCountries = np.full(shape=points, fill_value=country)\n for c in resultCountries:\n countryResult.append(c)\n print('iteration no:' + str(i))\n df = pd.DataFrame()\n df['date'] = dateResult\n df['temperature'] = temperatureResult\n df['latitude'] = latitudeResult\n df['longitude'] = longitudeResult\n df['city'] = cityResult\n df['country'] = countryResult\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\Temperatures.csv', index=False)\n return df\n\n\ndef model():\n ds = Dataset('air.mon.mean.v501.nc')\n print(ds)\n time = ds.variables['time']\n print(time.units)\n time_array = time[:]\n for t in time_array[:]:\n print(num2date(t, units=time.units))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef plot_temperatures_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for temperature values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n start) & (values['dt'] <= end)]\n x1 = filtered['dt']\n y1 = filtered['AverageTemperature']\n plt.plot(x1, y1, label='line 1')\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n '1973-01-01') & (values['dt'] <= '1974-01-01')]\n x2 = filtered['dt']\n y2 = filtered['AverageTemperature']\n plt.plot(x2, y2, label='line 2')\n plt.xlabel('x - axis - date')\n plt.ylabel('y - axis - temperature')\n plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef temperatures_by_city_till2013():\n \"\"\"\n Info for dataset, temperatures by city part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef temperatures_by_country_till2013():\n \"\"\"\n Info for dataset, temperatures by country part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef plot_co2_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for co2 values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['Year'] >=\n start) & (values['Year'] <= end)]\n x1 = filtered['Year']\n y1 = filtered['CO2']\n plt.plot(x1, y1, label='line 1')\n plt.xlabel('x - axis - year')\n plt.ylabel('y - axis - co2')\n plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef co2_by_country_till2019():\n \"\"\"\n Info for dataset, co2 by country part 1 - from 1751 to 2017\n \"\"\"\n co2_messy = pd.read_csv('CO2/emission data.csv')\n co2 = pd.melt(co2_messy, id_vars=['Country'], var_name='Year',\n value_name='CO2')\n df = pd.DataFrame()\n df['Country'] = co2['Country']\n df['Year'] = co2['Year']\n df['CO2'] = co2['CO2']\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\EmissionCO2.csv', index=False)\n\n\ndef get_lat_lon():\n \"\"\"\n Returns arrays for latitudes, longitudes, cities and countries\n from dataset, temperatures by country part 1, from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n Latitude = temperatures['Latitude']\n Longitude = temperatures['Longitude']\n City = temperatures['City']\n Country = temperatures['Country']\n lat_array = []\n long_array = []\n cities_array = []\n countries_array = []\n tuples = []\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\n if (i, j) not in tuples:\n tuples.append((i, j))\n lat_array.append(float(i[:-1]))\n long_array.append(float(j[:-1]))\n cities_array.append(city)\n countries_array.append(country)\n return lat_array, long_array, cities_array, countries_array\n\n\ndef make_dataset_temperatures(filename, points):\n \"\"\"\n From netCDF4 file to CSV file\n \"\"\"\n ds = Dataset(filename)\n lats, lons, cities, countries = get_lat_lon()\n print('The number of rows is ' + str(len(lats) * points))\n lon = ds.variables['longitude']\n lat = ds.variables['latitude']\n time = ds.variables['date_number']\n lon_array = lon[:]\n lat_array = lat[:]\n time_array = time[:]\n temperature = ds.variables['temperature']\n dates = []\n for time in time_array[:]:\n year = int(time)\n rem = time - year\n base = datetime(year, 1, 1)\n dates.append((base + timedelta(seconds=(base.replace(year=base.year +\n 1) - base).total_seconds() * rem)).date())\n dateResult = []\n temperatureResult = []\n latitudeResult = []\n longitudeResult = []\n cityResult = []\n countryResult = []\n for latitude, longitude, city, country in zip(lats, lons, cities, countries\n ):\n i = np.abs(lon_array - longitude).argmin()\n j = np.abs(lat_array - latitude).argmin()\n for d in dates:\n dateResult.append(d)\n resultTemperature = temperature[:, j, i]\n for t in resultTemperature:\n temperatureResult.append(t)\n resultLatitues = np.full(shape=points, fill_value=latitude, dtype=\n np.float)\n for l in resultLatitues:\n latitudeResult.append(l)\n resultLongitudes = np.full(shape=points, fill_value=longitude,\n dtype=np.float)\n for l in resultLongitudes:\n longitudeResult.append(l)\n resultCities = np.full(shape=points, fill_value=city)\n for c in resultCities:\n cityResult.append(c)\n resultCountries = np.full(shape=points, fill_value=country)\n for c in resultCountries:\n countryResult.append(c)\n print('iteration no:' + str(i))\n df = pd.DataFrame()\n df['date'] = dateResult\n df['temperature'] = temperatureResult\n df['latitude'] = latitudeResult\n df['longitude'] = longitudeResult\n df['city'] = cityResult\n df['country'] = countryResult\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\Temperatures.csv', index=False)\n return df\n\n\ndef model():\n ds = Dataset('air.mon.mean.v501.nc')\n print(ds)\n time = ds.variables['time']\n print(time.units)\n time_array = time[:]\n for t in time_array[:]:\n print(num2date(t, units=time.units))\n\n\nif __name__ == '__main__':\n print('Start')\n co2_by_country_till2019()\n df1 = make_dataset_temperatures('air.mon.mean.v501.nc', 1416)\n print(df1.head())\n df2 = make_dataset_temperatures('Complete_TAVG_Daily_LatLong1_2010.nc',\n 3652)\n print(df2.head())\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom netCDF4 import Dataset\nfrom cftime import num2date\nimport os\nimport numpy as np\nfrom datetime import datetime, timedelta, date\n\n\ndef plot_temperatures_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for temperature values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n start) & (values['dt'] <= end)]\n x1 = filtered['dt']\n y1 = filtered['AverageTemperature']\n plt.plot(x1, y1, label='line 1')\n filtered = values.loc[(values['Country'] == country) & (values['dt'] >=\n '1973-01-01') & (values['dt'] <= '1974-01-01')]\n x2 = filtered['dt']\n y2 = filtered['AverageTemperature']\n plt.plot(x2, y2, label='line 2')\n plt.xlabel('x - axis - date')\n plt.ylabel('y - axis - temperature')\n plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef temperatures_by_city_till2013():\n \"\"\"\n Info for dataset, temperatures by city part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef temperatures_by_country_till2013():\n \"\"\"\n Info for dataset, temperatures by country part 1 - from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv')\n print(len(temperatures))\n countries = temperatures['Country'].unique()\n print(len(countries))\n print(sorted(countries))\n\n\ndef plot_co2_by_country(values, country, start, end):\n \"\"\"\n Returns a plot for co2 values for a country\n from a start point to an end point\n \"\"\"\n filtered = values.loc[(values['Country'] == country) & (values['Year'] >=\n start) & (values['Year'] <= end)]\n x1 = filtered['Year']\n y1 = filtered['CO2']\n plt.plot(x1, y1, label='line 1')\n plt.xlabel('x - axis - year')\n plt.ylabel('y - axis - co2')\n plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)\n plt.show()\n\n\ndef co2_by_country_till2019():\n \"\"\"\n Info for dataset, co2 by country part 1 - from 1751 to 2017\n \"\"\"\n co2_messy = pd.read_csv('CO2/emission data.csv')\n co2 = pd.melt(co2_messy, id_vars=['Country'], var_name='Year',\n value_name='CO2')\n df = pd.DataFrame()\n df['Country'] = co2['Country']\n df['Year'] = co2['Year']\n df['CO2'] = co2['CO2']\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\EmissionCO2.csv', index=False)\n\n\ndef get_lat_lon():\n \"\"\"\n Returns arrays for latitudes, longitudes, cities and countries\n from dataset, temperatures by country part 1, from 1743 to 2013\n \"\"\"\n temperatures = pd.read_csv(\n 'GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv')\n Latitude = temperatures['Latitude']\n Longitude = temperatures['Longitude']\n City = temperatures['City']\n Country = temperatures['Country']\n lat_array = []\n long_array = []\n cities_array = []\n countries_array = []\n tuples = []\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\n if (i, j) not in tuples:\n tuples.append((i, j))\n lat_array.append(float(i[:-1]))\n long_array.append(float(j[:-1]))\n cities_array.append(city)\n countries_array.append(country)\n return lat_array, long_array, cities_array, countries_array\n\n\ndef make_dataset_temperatures(filename, points):\n \"\"\"\n From netCDF4 file to CSV file\n \"\"\"\n ds = Dataset(filename)\n lats, lons, cities, countries = get_lat_lon()\n print('The number of rows is ' + str(len(lats) * points))\n lon = ds.variables['longitude']\n lat = ds.variables['latitude']\n time = ds.variables['date_number']\n lon_array = lon[:]\n lat_array = lat[:]\n time_array = time[:]\n temperature = ds.variables['temperature']\n dates = []\n for time in time_array[:]:\n year = int(time)\n rem = time - year\n base = datetime(year, 1, 1)\n dates.append((base + timedelta(seconds=(base.replace(year=base.year +\n 1) - base).total_seconds() * rem)).date())\n dateResult = []\n temperatureResult = []\n latitudeResult = []\n longitudeResult = []\n cityResult = []\n countryResult = []\n for latitude, longitude, city, country in zip(lats, lons, cities, countries\n ):\n i = np.abs(lon_array - longitude).argmin()\n j = np.abs(lat_array - latitude).argmin()\n for d in dates:\n dateResult.append(d)\n resultTemperature = temperature[:, j, i]\n for t in resultTemperature:\n temperatureResult.append(t)\n resultLatitues = np.full(shape=points, fill_value=latitude, dtype=\n np.float)\n for l in resultLatitues:\n latitudeResult.append(l)\n resultLongitudes = np.full(shape=points, fill_value=longitude,\n dtype=np.float)\n for l in resultLongitudes:\n longitudeResult.append(l)\n resultCities = np.full(shape=points, fill_value=city)\n for c in resultCities:\n cityResult.append(c)\n resultCountries = np.full(shape=points, fill_value=country)\n for c in resultCountries:\n countryResult.append(c)\n print('iteration no:' + str(i))\n df = pd.DataFrame()\n df['date'] = dateResult\n df['temperature'] = temperatureResult\n df['latitude'] = latitudeResult\n df['longitude'] = longitudeResult\n df['city'] = cityResult\n df['country'] = countryResult\n df.to_csv('C:\\\\Users\\\\stoja\\\\Desktop\\\\Temperatures.csv', index=False)\n return df\n\n\ndef model():\n ds = Dataset('air.mon.mean.v501.nc')\n print(ds)\n time = ds.variables['time']\n print(time.units)\n time_array = time[:]\n for t in time_array[:]:\n print(num2date(t, units=time.units))\n\n\nif __name__ == '__main__':\n print('Start')\n co2_by_country_till2019()\n df1 = make_dataset_temperatures('air.mon.mean.v501.nc', 1416)\n print(df1.head())\n df2 = make_dataset_temperatures('Complete_TAVG_Daily_LatLong1_2010.nc',\n 3652)\n print(df2.head())\n",
"step-5": "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom netCDF4 import Dataset\r\nfrom cftime import num2date\r\nimport os\r\nimport numpy as np\r\nfrom datetime import datetime, timedelta, date\r\n\r\n\r\ndef plot_temperatures_by_country(values, country, start, end):\r\n \"\"\"\r\n Returns a plot for temperature values for a country\r\n from a start point to an end point\r\n \"\"\"\r\n\r\n filtered = values.loc[(values['Country'] == country) &\r\n (values['dt'] >= start) &\r\n (values['dt'] <= end)]\r\n\r\n # x axis values\r\n x1 = filtered['dt']\r\n # corresponding y axis values\r\n y1 = filtered['AverageTemperature']\r\n\r\n # plotting the points\r\n plt.plot(x1, y1, label = \"line 1\")\r\n\r\n filtered = values.loc[(values['Country'] == country) &\r\n (values['dt'] >= '1973-01-01') &\r\n (values['dt'] <= '1974-01-01')]\r\n\r\n # x axis values\r\n x2 = filtered['dt']\r\n # corresponding y axis values\r\n y2 = filtered['AverageTemperature']\r\n\r\n # plotting the points\r\n plt.plot(x2, y2, label=\"line 2\")\r\n\r\n # naming the x axis\r\n plt.xlabel('x - axis - date')\r\n # naming the y axis\r\n plt.ylabel('y - axis - temperature')\r\n\r\n plt.title('Temperatures from ' + start + ' to ' + end + ' for ' + country)\r\n\r\n # function to show the plot\r\n plt.show()\r\n\r\n\r\ndef temperatures_by_city_till2013():\r\n \"\"\"\r\n Info for dataset, temperatures by city part 1 - from 1743 to 2013\r\n \"\"\"\r\n\r\n # Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv\")\r\n\r\n # 8 599 212 rows\r\n print(len(temperatures))\r\n\r\n countries = temperatures['Country'].unique()\r\n print(len(countries))\r\n print(sorted(countries))\r\n\r\n\r\ndef temperatures_by_country_till2013():\r\n \"\"\"\r\n Info for dataset, temperatures by country part 1 - from 1743 to 2013\r\n \"\"\"\r\n\r\n # Columns: dt, AverageTemperature, AverageTemperatureUncertainty, Country\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCountry.csv\")\r\n\r\n # 577 462 rows\r\n print(len(temperatures))\r\n\r\n countries = temperatures['Country'].unique()\r\n print(len(countries))\r\n print(sorted(countries))\r\n\r\n\r\ndef plot_co2_by_country(values, country, start, end):\r\n \"\"\"\r\n Returns a plot for co2 values for a country\r\n from a start point to an end point\r\n \"\"\"\r\n\r\n filtered = values.loc[(values['Country'] == country) &\r\n (values['Year'] >= start) &\r\n (values['Year'] <= end)]\r\n\r\n # x axis values\r\n x1 = filtered['Year']\r\n # corresponding y axis values\r\n y1 = filtered['CO2']\r\n\r\n # plotting the points\r\n plt.plot(x1, y1, label = \"line 1\")\r\n\r\n # naming the x axis\r\n plt.xlabel('x - axis - year')\r\n # naming the y axis\r\n plt.ylabel('y - axis - co2')\r\n\r\n # giving a title to my graph\r\n plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)\r\n\r\n # function to show the plot\r\n plt.show()\r\n\r\n\r\ndef co2_by_country_till2019():\r\n \"\"\"\r\n Info for dataset, co2 by country part 1 - from 1751 to 2017\r\n \"\"\"\r\n co2_messy = pd.read_csv(\"CO2/emission data.csv\")\r\n\r\n co2 = pd.melt(co2_messy, id_vars=[\"Country\"], var_name=\"Year\", value_name=\"CO2\")\r\n\r\n df = pd.DataFrame()\r\n df['Country'] = co2['Country']\r\n df['Year'] = co2['Year']\r\n df['CO2'] = co2['CO2']\r\n\r\n df.to_csv(r'C:\\Users\\stoja\\Desktop\\EmissionCO2.csv', index=False)\r\n\r\n\r\ndef get_lat_lon():\r\n \"\"\"\r\n Returns arrays for latitudes, longitudes, cities and countries\r\n from dataset, temperatures by country part 1, from 1743 to 2013\r\n \"\"\"\r\n\r\n # Columns: dt,AverageTemperature,AverageTemperatureUncertainty,City,Country,Latitude,Longitude\r\n temperatures = pd.read_csv(\"GlobalLandTemperatures/GlobalLandTemperaturesByCity.csv\")\r\n\r\n Latitude = temperatures['Latitude']\r\n Longitude = temperatures['Longitude']\r\n City = temperatures['City']\r\n Country = temperatures['Country']\r\n\r\n lat_array = []\r\n long_array = []\r\n cities_array = []\r\n countries_array = []\r\n tuples = []\r\n for i, j, city, country in zip(Latitude, Longitude, City, Country):\r\n if (i, j) not in tuples:\r\n tuples.append((i, j))\r\n lat_array.append(float(i[:-1]))\r\n long_array.append(float(j[:-1]))\r\n cities_array.append(city)\r\n countries_array.append(country)\r\n\r\n return lat_array, long_array, cities_array, countries_array\r\n\r\n\r\ndef make_dataset_temperatures(filename, points):\r\n \"\"\"\r\n From netCDF4 file to CSV file\r\n \"\"\"\r\n\r\n ds = Dataset(filename)\r\n\r\n lats, lons, cities, countries = get_lat_lon()\r\n\r\n # total lat,lon pairs: 1366\r\n print('The number of rows is ' + str(len(lats)*points))\r\n lon = ds.variables['longitude']\r\n lat = ds.variables['latitude']\r\n time = ds.variables['date_number']\r\n\r\n lon_array = lon[:]\r\n lat_array = lat[:]\r\n time_array = time[:]\r\n\r\n temperature = ds.variables['temperature']\r\n\r\n dates = []\r\n for time in time_array[:]:\r\n year = int(time)\r\n rem = time - year\r\n base = datetime(year, 1, 1)\r\n dates.append((base + timedelta(seconds=(base.replace(year=base.year + 1) - base).total_seconds() * rem)).date())\r\n\r\n # second approach\r\n # for t in time_array[:]:\r\n # dates.append(num2date(t, units=time.units))\r\n\r\n dateResult = []\r\n temperatureResult = []\r\n latitudeResult = []\r\n longitudeResult = []\r\n cityResult = []\r\n countryResult = []\r\n\r\n for latitude, longitude, city, country in zip(lats, lons, cities, countries):\r\n\r\n # We want to find data for latitude, longitude\r\n # We first need to find the indexes\r\n i = np.abs(lon_array - longitude).argmin()\r\n j = np.abs(lat_array - latitude).argmin()\r\n\r\n for d in dates:\r\n dateResult.append(d)\r\n\r\n resultTemperature = temperature[:, j, i]\r\n for t in resultTemperature:\r\n temperatureResult.append(t)\r\n\r\n resultLatitues = np.full(\r\n shape=points,\r\n fill_value=latitude,\r\n dtype=np.float\r\n )\r\n for l in resultLatitues:\r\n latitudeResult.append(l)\r\n\r\n resultLongitudes = np.full(\r\n shape=points,\r\n fill_value=longitude,\r\n dtype=np.float\r\n )\r\n for l in resultLongitudes:\r\n longitudeResult.append(l)\r\n\r\n resultCities = np.full(\r\n shape=points,\r\n fill_value=city\r\n )\r\n for c in resultCities:\r\n cityResult.append(c)\r\n\r\n resultCountries = np.full(\r\n shape=points,\r\n fill_value=country\r\n )\r\n for c in resultCountries:\r\n countryResult.append(c)\r\n\r\n print('iteration no:' + str(i))\r\n\r\n df = pd.DataFrame()\r\n df['date'] = dateResult\r\n df['temperature'] = temperatureResult\r\n df['latitude'] = latitudeResult\r\n df['longitude'] = longitudeResult\r\n df['city'] = cityResult\r\n df['country'] = countryResult\r\n\r\n df.to_csv(r'C:\\Users\\stoja\\Desktop\\Temperatures.csv', index=False)\r\n return df\r\n\r\n\r\ndef model():\r\n\r\n # Info for netCDF4 file\r\n # 1416\r\n ds = Dataset('air.mon.mean.v501.nc')\r\n print(ds)\r\n time = ds.variables['time']\r\n print(time.units)\r\n time_array = time[:]\r\n for t in time_array[:]:\r\n print(num2date(t, units=time.units))\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Start')\r\n\r\n # Making the CO2 dataset\r\n co2_by_country_till2019()\r\n\r\n # Making the temperatures dataset\r\n df1 = make_dataset_temperatures('air.mon.mean.v501.nc', 1416)\r\n print(df1.head())\r\n\r\n # Making the temperatures anomalies dataset\r\n df2 = make_dataset_temperatures('Complete_TAVG_Daily_LatLong1_2010.nc', 3652)\r\n print(df2.head())\r\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
from os import wait
import cv2
import numpy as np
import math
import sys
import types
import operator
## orb 및 bf matcher 선언
orb = cv2.cv2.ORB_create(
nfeatures=5000,
scaleFactor=1.2,
nlevels=8,
edgeThreshold=31,
firstLevel=0,
WTA_K=2,
scoreType=cv2.ORB_FAST_SCORE,
patchSize=31,
fastThreshold=25,
)
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
def getScale(NumFrame, t_gt, seq_num):
txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.format(seq_num))
x_prev = float(t_gt[0])
y_prev = float(t_gt[1])
z_prev = float(t_gt[2])
line = txt_file.readlines()
line_sp = line[NumFrame].split(' ')
x = float(line_sp[3])
y = float(line_sp[7])
z = float(line_sp[11])
t_gt[0] = x
t_gt[1] = y
t_gt[2] = z
txt_file.close()
scale = math.sqrt((x-x_prev)**2 + (y-y_prev)**2 + (z-z_prev)**2)
return scale, t_gt
if __name__ == "__main__":
MAX_FRAME = 1000
SEQ_NUM = 2
#Camera intrinsic parameter
focal = 718.8560
pp = (607.1928, 185.2157)
textOrg1 = (10,30)
textOrg2 = (10,80)
textOrg3 = (10,130)
img_1_c = cv2.imread("/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png".format(SEQ_NUM))
img_2_c = cv2.imread("/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png".format(SEQ_NUM))
img_1 = cv2.cvtColor(img_1_c,cv2.COLOR_BGR2GRAY)
img_2 = cv2.cvtColor(img_2_c,cv2.COLOR_BGR2GRAY)
kp1, des1 = orb.detectAndCompute(img_1,None)
kp2, des2 = orb.detectAndCompute(img_2,None)
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp1[i.queryIdx].pt)
pts2.append(kp2[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
E, mask = cv2.findEssentialMat(pts1,pts2,focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)
_, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal = focal, pp = pp)
R_f_seg = R_f
t_f_seg = t_f
t_gt = np.zeros((3,1),dtype=np.float64)
prevImage = img_2
kp_prev = kp2
des_prev = des2
traj = np.zeros((1000,2000),dtype=np.uint8)
traj = cv2.cvtColor(traj,cv2.COLOR_GRAY2BGR)
rmse_total = 0
for numFrame in range(2, MAX_FRAME):
filename = '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'.format(SEQ_NUM,numFrame)
currImage_c = cv2.imread(filename)
currImage = cv2.cvtColor(currImage_c,cv2.COLOR_BGR2GRAY)
# feature extraction
kp_curr, des_curr = orb.detectAndCompute(currImage,None)
# feature matching
matches = bf.match(des_prev,des_curr)
matches = sorted(matches, key = lambda x:x.distance)
idx = matches[0:1500]
pts1 = []
pts2 = []
for i in idx:
pts1.append(kp_prev[i.queryIdx].pt)
pts2.append(kp_curr[i.trainIdx].pt)
pts1 = np.array(pts1)
pts2 = np.array(pts2)
# caculate R, t
E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)
_, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal = focal, pp = pp)
# get scale
abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)
# update trajectory
t_f = t_f + abs_scale*R_f.dot(t)
R_f = R.dot(R_f)
# caculate Error
error = map(operator.sub,t_gt,t_f)
error_sum_square = sum(map(lambda x:x*x,error))
rmse = math.sqrt(error_sum_square/3)
rmse_total = rmse_total + rmse
print("rmse = ",rmse_total/numFrame)
prevImage = currImage
kp_prev = kp_curr
des_prev = des_curr
# visualization
x_gt = int(t_gt[0]) + 1000
y_gt = int(t_gt[2]) + 100
x = int(t_f[0]) + 1000
y = int(t_f[2]) + 100
cv2.circle(traj, (x,y), 1 , (0,0,255), 2)
cv2.circle(traj, (x_gt,y_gt), 1 , (0,255,0), 2)
cv2.rectangle(traj, (10,10), (700,150), (0,0,0), -1)
text1 = 'orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_f[0]),float(t_f[1]),float(t_f[2]))
cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)
text3 = 'gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_gt[0]),float(t_gt[1]),float(t_gt[2]))
cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)
feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)
cv2.imshow("trajectory", traj)
cv2.imshow("feat_img", feature_img)
cv2.waitKey(1)
cv2.imwrite("result_{0:02d}.png".format(SEQ_NUM),traj)
|
normal
|
{
"blob_id": "73e7e43e9cfb3c0884480809bc03ade687d641d6",
"index": 733,
"step-1": "<mask token>\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\nif __name__ == '__main__':\n MAX_FRAME = 1000\n SEQ_NUM = 2\n focal = 718.856\n pp = 607.1928, 185.2157\n textOrg1 = 10, 30\n textOrg2 = 10, 80\n textOrg3 = 10, 130\n img_1_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'\n .format(SEQ_NUM))\n img_2_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'\n .format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)\n kp1, des1 = orb.detectAndCompute(img_1, None)\n kp2, des2 = orb.detectAndCompute(img_2, None)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=\n cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)\n R_f_seg = R_f\n t_f_seg = t_f\n t_gt = np.zeros((3, 1), dtype=np.float64)\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n traj = np.zeros((1000, 2000), dtype=np.uint8)\n traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)\n rmse_total = 0\n for numFrame in range(2, MAX_FRAME):\n filename = (\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'\n .format(SEQ_NUM, numFrame))\n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)\n kp_curr, des_curr = orb.detectAndCompute(currImage, None)\n matches = bf.match(des_prev, des_curr)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,\n method=cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n t_f = t_f + abs_scale * R_f.dot(t)\n R_f = R.dot(R_f)\n error = map(operator.sub, t_gt, t_f)\n error_sum_square = sum(map(lambda x: x * x, error))\n rmse = math.sqrt(error_sum_square / 3)\n rmse_total = rmse_total + rmse\n print('rmse = ', rmse_total / numFrame)\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)\n cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)\n cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)\n text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_f[0]), float(t_f[1]), float(t_f[2])))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n cv2.imshow('trajectory', traj)\n cv2.imshow('feat_img', feature_img)\n cv2.waitKey(1)\n cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)\n",
"step-3": "<mask token>\norb = cv2.cv2.ORB_create(nfeatures=5000, scaleFactor=1.2, nlevels=8,\n edgeThreshold=31, firstLevel=0, WTA_K=2, scoreType=cv2.ORB_FAST_SCORE,\n patchSize=31, fastThreshold=25)\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\nif __name__ == '__main__':\n MAX_FRAME = 1000\n SEQ_NUM = 2\n focal = 718.856\n pp = 607.1928, 185.2157\n textOrg1 = 10, 30\n textOrg2 = 10, 80\n textOrg3 = 10, 130\n img_1_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'\n .format(SEQ_NUM))\n img_2_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'\n .format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)\n kp1, des1 = orb.detectAndCompute(img_1, None)\n kp2, des2 = orb.detectAndCompute(img_2, None)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=\n cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)\n R_f_seg = R_f\n t_f_seg = t_f\n t_gt = np.zeros((3, 1), dtype=np.float64)\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n traj = np.zeros((1000, 2000), dtype=np.uint8)\n traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)\n rmse_total = 0\n for numFrame in range(2, MAX_FRAME):\n filename = (\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'\n .format(SEQ_NUM, numFrame))\n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)\n kp_curr, des_curr = orb.detectAndCompute(currImage, None)\n matches = bf.match(des_prev, des_curr)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,\n method=cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n t_f = t_f + abs_scale * R_f.dot(t)\n R_f = R.dot(R_f)\n error = map(operator.sub, t_gt, t_f)\n error_sum_square = sum(map(lambda x: x * x, error))\n rmse = math.sqrt(error_sum_square / 3)\n rmse_total = rmse_total + rmse\n print('rmse = ', rmse_total / numFrame)\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)\n cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)\n cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)\n text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_f[0]), float(t_f[1]), float(t_f[2])))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n cv2.imshow('trajectory', traj)\n cv2.imshow('feat_img', feature_img)\n cv2.waitKey(1)\n cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)\n",
"step-4": "from os import wait\nimport cv2\nimport numpy as np\nimport math\nimport sys\nimport types\nimport operator\norb = cv2.cv2.ORB_create(nfeatures=5000, scaleFactor=1.2, nlevels=8,\n edgeThreshold=31, firstLevel=0, WTA_K=2, scoreType=cv2.ORB_FAST_SCORE,\n patchSize=31, fastThreshold=25)\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.\n format(seq_num))\n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n txt_file.close()\n scale = math.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2 + (z - z_prev) ** 2\n )\n return scale, t_gt\n\n\nif __name__ == '__main__':\n MAX_FRAME = 1000\n SEQ_NUM = 2\n focal = 718.856\n pp = 607.1928, 185.2157\n textOrg1 = 10, 30\n textOrg2 = 10, 80\n textOrg3 = 10, 130\n img_1_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png'\n .format(SEQ_NUM))\n img_2_c = cv2.imread(\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png'\n .format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c, cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c, cv2.COLOR_BGR2GRAY)\n kp1, des1 = orb.detectAndCompute(img_1, None)\n kp2, des2 = orb.detectAndCompute(img_2, None)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E, mask = cv2.findEssentialMat(pts1, pts2, focal=focal, pp=pp, method=\n cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal=focal, pp=pp)\n R_f_seg = R_f\n t_f_seg = t_f\n t_gt = np.zeros((3, 1), dtype=np.float64)\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n traj = np.zeros((1000, 2000), dtype=np.uint8)\n traj = cv2.cvtColor(traj, cv2.COLOR_GRAY2BGR)\n rmse_total = 0\n for numFrame in range(2, MAX_FRAME):\n filename = (\n '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'\n .format(SEQ_NUM, numFrame))\n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c, cv2.COLOR_BGR2GRAY)\n kp_curr, des_curr = orb.detectAndCompute(currImage, None)\n matches = bf.match(des_prev, des_curr)\n matches = sorted(matches, key=lambda x: x.distance)\n idx = matches[0:1500]\n pts1 = []\n pts2 = []\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal=focal, pp=pp,\n method=cv2.RANSAC, prob=0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal=focal, pp=pp)\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n t_f = t_f + abs_scale * R_f.dot(t)\n R_f = R.dot(R_f)\n error = map(operator.sub, t_gt, t_f)\n error_sum_square = sum(map(lambda x: x * x, error))\n rmse = math.sqrt(error_sum_square / 3)\n rmse_total = rmse_total + rmse\n print('rmse = ', rmse_total / numFrame)\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n cv2.circle(traj, (x, y), 1, (0, 0, 255), 2)\n cv2.circle(traj, (x_gt, y_gt), 1, (0, 255, 0), 2)\n cv2.rectangle(traj, (10, 10), (700, 150), (0, 0, 0), -1)\n text1 = ('orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_f[0]), float(t_f[1]), float(t_f[2])))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n text3 = ('gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.\n format(float(t_gt[0]), float(t_gt[1]), float(t_gt[2])))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 255, 255), 1, 8)\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n cv2.imshow('trajectory', traj)\n cv2.imshow('feat_img', feature_img)\n cv2.waitKey(1)\n cv2.imwrite('result_{0:02d}.png'.format(SEQ_NUM), traj)\n",
"step-5": "from os import wait\nimport cv2\nimport numpy as np\nimport math\nimport sys\nimport types\nimport operator\n\n## orb 및 bf matcher 선언\norb = cv2.cv2.ORB_create(\n nfeatures=5000,\n scaleFactor=1.2,\n nlevels=8,\n edgeThreshold=31,\n firstLevel=0,\n WTA_K=2,\n scoreType=cv2.ORB_FAST_SCORE,\n patchSize=31,\n fastThreshold=25,\n )\n\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n\ndef getScale(NumFrame, t_gt, seq_num):\n\n txt_file = open('/media/cordin/새 볼륨/rosbag/dataset/poses/{0:02d}.txt'.format(seq_num))\n \n x_prev = float(t_gt[0])\n y_prev = float(t_gt[1])\n z_prev = float(t_gt[2])\n\n line = txt_file.readlines()\n line_sp = line[NumFrame].split(' ')\n\n x = float(line_sp[3])\n y = float(line_sp[7])\n z = float(line_sp[11])\n\n t_gt[0] = x\n t_gt[1] = y\n t_gt[2] = z\n\n txt_file.close()\n\n scale = math.sqrt((x-x_prev)**2 + (y-y_prev)**2 + (z-z_prev)**2)\n return scale, t_gt\n\n\nif __name__ == \"__main__\":\n MAX_FRAME = 1000\n SEQ_NUM = 2\n\n #Camera intrinsic parameter\n focal = 718.8560\n pp = (607.1928, 185.2157)\n\n textOrg1 = (10,30)\n textOrg2 = (10,80)\n textOrg3 = (10,130)\n\n img_1_c = cv2.imread(\"/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000000.png\".format(SEQ_NUM))\n img_2_c = cv2.imread(\"/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/000001.png\".format(SEQ_NUM))\n img_1 = cv2.cvtColor(img_1_c,cv2.COLOR_BGR2GRAY)\n img_2 = cv2.cvtColor(img_2_c,cv2.COLOR_BGR2GRAY)\n\n kp1, des1 = orb.detectAndCompute(img_1,None)\n kp2, des2 = orb.detectAndCompute(img_2,None)\n\n matches = bf.match(des1,des2)\n matches = sorted(matches, key = lambda x:x.distance)\n\n idx = matches[0:1500]\n\n pts1 = []\n pts2 = []\n\n for i in idx:\n pts1.append(kp1[i.queryIdx].pt)\n pts2.append(kp2[i.trainIdx].pt)\n\n\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n\n E, mask = cv2.findEssentialMat(pts1,pts2,focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)\n _, R_f, t_f, _ = cv2.recoverPose(E, pts1, pts2, focal = focal, pp = pp)\n\n R_f_seg = R_f\n t_f_seg = t_f\n\n t_gt = np.zeros((3,1),dtype=np.float64)\n\n prevImage = img_2\n kp_prev = kp2\n des_prev = des2\n\n traj = np.zeros((1000,2000),dtype=np.uint8)\n traj = cv2.cvtColor(traj,cv2.COLOR_GRAY2BGR)\n\n rmse_total = 0\n \n for numFrame in range(2, MAX_FRAME):\n filename = '/media/cordin/새 볼륨/rosbag/dataset/sequences/{0:02d}/image_0/{1:06d}.png'.format(SEQ_NUM,numFrame)\n \n currImage_c = cv2.imread(filename)\n currImage = cv2.cvtColor(currImage_c,cv2.COLOR_BGR2GRAY)\n\n # feature extraction\n kp_curr, des_curr = orb.detectAndCompute(currImage,None)\n\n # feature matching\n matches = bf.match(des_prev,des_curr)\n matches = sorted(matches, key = lambda x:x.distance)\n idx = matches[0:1500]\n\n pts1 = []\n pts2 = []\n\n for i in idx:\n pts1.append(kp_prev[i.queryIdx].pt)\n pts2.append(kp_curr[i.trainIdx].pt)\n\n pts1 = np.array(pts1)\n pts2 = np.array(pts2)\n\n # caculate R, t\n E_mat, mask_n = cv2.findEssentialMat(pts2, pts1, focal = focal, pp = pp, method=cv2.RANSAC, prob = 0.999, threshold=1.0)\n _, R, t, _ = cv2.recoverPose(E_mat, pts2, pts1, focal = focal, pp = pp)\n\n # get scale\n abs_scale, t_gt = getScale(numFrame, t_gt, SEQ_NUM)\n \n # update trajectory\n t_f = t_f + abs_scale*R_f.dot(t)\n R_f = R.dot(R_f)\n\n # caculate Error\n error = map(operator.sub,t_gt,t_f)\n error_sum_square = sum(map(lambda x:x*x,error))\n rmse = math.sqrt(error_sum_square/3)\n rmse_total = rmse_total + rmse\n\n print(\"rmse = \",rmse_total/numFrame)\n\n prevImage = currImage\n kp_prev = kp_curr\n des_prev = des_curr\n\n # visualization\n x_gt = int(t_gt[0]) + 1000\n y_gt = int(t_gt[2]) + 100\n\n x = int(t_f[0]) + 1000\n y = int(t_f[2]) + 100\n\n cv2.circle(traj, (x,y), 1 , (0,0,255), 2)\n cv2.circle(traj, (x_gt,y_gt), 1 , (0,255,0), 2)\n \n\n cv2.rectangle(traj, (10,10), (700,150), (0,0,0), -1)\n text1 = 'orb Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_f[0]),float(t_f[1]),float(t_f[2]))\n cv2.putText(traj, text1, textOrg1, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)\n\n text3 = 'gt Coordinates: x = {0:02f}m y = {1:02f}m z = {2:02f}m'.format(float(t_gt[0]),float(t_gt[1]),float(t_gt[2]))\n cv2.putText(traj, text3, textOrg3, cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1,8)\n\n feature_img = cv2.drawKeypoints(currImage_c, kp_curr, None)\n\n cv2.imshow(\"trajectory\", traj)\n cv2.imshow(\"feat_img\", feature_img)\n\n cv2.waitKey(1)\n \n cv2.imwrite(\"result_{0:02d}.png\".format(SEQ_NUM),traj)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) ->int:
return self._position
@property
def size(self) ->int:
return self._size
<|reserved_special_token_0|>
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) ->datetime.datetime:
return self._time.astimezone()
def __repr__(self) ->str:
result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) ->str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude
], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,
x_acceleration: int, y_acceleration: int, z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, latitude = {latitude}'
)
if (status == 'V' or status is None) != (longitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, longitude = {longitude}'
)
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) ->Time:
return self._time
@property
def status(self) ->str:
return self._status
@property
def latitude(self) ->Optional[Latitude]:
return self._latitude
@property
def longitude(self) ->Optional[Longitude]:
return self._longitude
@property
def speed(self) ->Speed:
return self._speed
@property
def azimuth(self) ->Azimuth:
return self._azimuth
@property
def x_acceleration(self) ->int:
return self._x_acceleration
@property
def y_acceleration(self) ->int:
return self._y_acceleration
@property
def z_acceleration(self) ->int:
return self._z_acceleration
@property
def name(self) ->str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) ->str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) ->str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return (self._time == other._time and self._latitude == other.
_latitude and self._longitude == other._longitude)
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) ->None:
self._track_points.append(track_point)
def __len__(self) ->int:
return len(self._track_points)
def __iter__(self) ->Iterable[TrackPoint]:
return iter(self._track_points)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Arguments(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def author_name(self) ->Optional[str]:
return self._author_name
<|reserved_special_token_0|>
@property
def copyright(self) ->Optional[str]:
return self._copyright
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def keywords(self) ->Optional[str]:
return self._keywords
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BrokenMp4FileError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) ->int:
return self._position
@property
def size(self) ->int:
return self._size
<|reserved_special_token_0|>
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) ->datetime.datetime:
return self._time.astimezone()
def __repr__(self) ->str:
result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) ->str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude
], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,
x_acceleration: int, y_acceleration: int, z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, latitude = {latitude}'
)
if (status == 'V' or status is None) != (longitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, longitude = {longitude}'
)
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) ->Time:
return self._time
@property
def status(self) ->str:
return self._status
@property
def latitude(self) ->Optional[Latitude]:
return self._latitude
@property
def longitude(self) ->Optional[Longitude]:
return self._longitude
@property
def speed(self) ->Speed:
return self._speed
@property
def azimuth(self) ->Azimuth:
return self._azimuth
@property
def x_acceleration(self) ->int:
return self._x_acceleration
@property
def y_acceleration(self) ->int:
return self._y_acceleration
@property
def z_acceleration(self) ->int:
return self._z_acceleration
@property
def name(self) ->str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) ->str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) ->str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return (self._time == other._time and self._latitude == other.
_latitude and self._longitude == other._longitude)
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) ->None:
self._track_points.append(track_point)
def __len__(self) ->int:
return len(self._track_points)
def __iter__(self) ->Iterable[TrackPoint]:
return iter(self._track_points)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Arguments(object):
def __init__(self):
parser = argparse.ArgumentParser(prog='papago2gpx', description=
'Extract GPS data from MP4 video files created by PAPAGO! dashcams, and format them into a GPX file.'
)
parser.add_argument('input_paths', nargs='+', help=
'The path to an input file or directory.', metavar='INPUT_PATH')
parser.add_argument('--name', help=
'The name of the GPX file to output. Default to 16 deciaml digits representing the first GPS record time.'
, metavar='NAME')
parser.add_argument('--description', help=
'The description of the GPX file to output.', metavar='DESCRIPTION'
)
parser.add_argument('--author-name', help=
'The name of the author of the GPX file to output.', metavar=
'AUTHOR_NAME')
parser.add_argument('--author-email', help=
'The Email address of the author of the GPX file to output.',
metavar='AUTHOR_EMAIL')
parser.add_argument('--copyright', help=
"The copyright holder of the GPX file to output. Default to `AUTHOR_NAME'."
, metavar='COPYRIGHT')
parser.add_argument('--copyright-year', help=
'The copyright year of the GPX file to output. Default to the year the file is created.'
, metavar='COPYRIGHT_YEAR')
parser.add_argument('--copyright-license', help=
'A link to an external file containing license text.', metavar=
'LICENSE')
parser.add_argument('--keywords', help=
'Keywords associated with the GPX file to output.', metavar=
'KEYWORDS')
parser.add_argument('--track-name', help='The name of the track.',
metavar='TRACK_NAME')
parser.add_argument('--track-comment', help=
'The comment of the track.', metavar='TRACK_COMMENT')
parser.add_argument('--track-description', help=
'The description of the track.', metavar='TRACK_DESCRIPTION')
parser.add_argument('--track-type', default=_DEFAULT_TRACK_TYPE,
help=f"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.")
parser.add_argument('--uniq', choices=['first', 'last'], help=
'How to process different coordinates recorded at the same timestamp. Default to an error.'
)
parser.add_argument('--overwrite', action='store_true', help=
'Allow to overwrite an existing file.')
args = parser.parse_args()
self._input_paths = []
for input_path in args.input_paths:
input_path = pathlib.Path(input_path)
if not input_path.exists():
print(f'{input_path}: File does not exist.', file=sys.stderr)
sys.exit(1)
self._input_paths.append(input_path)
self._name = args.name
self._description = args.description
self._author_name = args.author_name
self._author_email = args.author_email
self._copyright = args.copyright
if self._copyright is None and self._author_name is not None:
self._copyright = self._author_name
self._copyright_year = args.copyright_year
if self._copyright_year is not None and self._copyright is None:
print("`--copyright-year' is specified, but `--copyright' is not.",
file=sys.stderr)
sys.exit(1)
if self._copyright_year is None and self._copyright is not None:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
self._copyright_year = local_aware_now.year
self._copyright_license = args.copyright_license
if self._copyright_license is not None and self._copyright is None:
print(
"`--copyright-license' is specified, but `--copyright' is not."
, file=sys.stderr)
sys.exit(1)
self._keywords = args.keywords
self._track_name = args.track_name
self._track_comment = args.track_comment
self._track_description = args.track_description
self._track_type = args.track_type
if self._track_type is None:
self._track_type = _DEFAULT_TRACK_TYPE
if self._track_type == '':
self._track_type = None
self._how_to_unique = args.uniq
self._overwrite = args.overwrite
@property
def input_paths(self) ->List[pathlib.Path]:
return self._input_paths
<|reserved_special_token_0|>
@property
def description(self) ->Optional[str]:
return self._description
@property
def author_name(self) ->Optional[str]:
return self._author_name
@property
def author_email(self) ->Optional[str]:
return self._author_email
@property
def copyright(self) ->Optional[str]:
return self._copyright
@property
def copyright_year(self) ->Optional[int]:
return self._copyright_year
@property
def copyright_license(self) ->Optional[str]:
return self._copyright_license
@property
def keywords(self) ->Optional[str]:
return self._keywords
@property
def track_name(self) ->Optional[str]:
return self._track_name
@property
def track_comment(self) ->Optional[str]:
return self._track_comment
@property
def track_description(self) ->Optional[str]:
return self._track_description
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BrokenMp4FileError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) ->int:
return self._position
@property
def size(self) ->int:
return self._size
<|reserved_special_token_0|>
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) ->datetime.datetime:
return self._time.astimezone()
def __repr__(self) ->str:
result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) ->str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude
], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,
x_acceleration: int, y_acceleration: int, z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, latitude = {latitude}'
)
if (status == 'V' or status is None) != (longitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, longitude = {longitude}'
)
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) ->Time:
return self._time
@property
def status(self) ->str:
return self._status
@property
def latitude(self) ->Optional[Latitude]:
return self._latitude
@property
def longitude(self) ->Optional[Longitude]:
return self._longitude
@property
def speed(self) ->Speed:
return self._speed
@property
def azimuth(self) ->Azimuth:
return self._azimuth
@property
def x_acceleration(self) ->int:
return self._x_acceleration
@property
def y_acceleration(self) ->int:
return self._y_acceleration
@property
def z_acceleration(self) ->int:
return self._z_acceleration
@property
def name(self) ->str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) ->str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) ->str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return (self._time == other._time and self._latitude == other.
_latitude and self._longitude == other._longitude)
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) ->None:
self._track_points.append(track_point)
def __len__(self) ->int:
return len(self._track_points)
def __iter__(self) ->Iterable[TrackPoint]:
return iter(self._track_points)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import re
import datetime
import math
import pathlib
import os
import io
import argparse
import subprocess
import xml.sax.saxutils
from typing import Optional, List, Iterable
import sys
_DEFAULT_TRACK_TYPE = 'Dashcam track'
class Arguments(object):
def __init__(self):
parser = argparse.ArgumentParser(prog='papago2gpx', description=
'Extract GPS data from MP4 video files created by PAPAGO! dashcams, and format them into a GPX file.'
)
parser.add_argument('input_paths', nargs='+', help=
'The path to an input file or directory.', metavar='INPUT_PATH')
parser.add_argument('--name', help=
'The name of the GPX file to output. Default to 16 deciaml digits representing the first GPS record time.'
, metavar='NAME')
parser.add_argument('--description', help=
'The description of the GPX file to output.', metavar='DESCRIPTION'
)
parser.add_argument('--author-name', help=
'The name of the author of the GPX file to output.', metavar=
'AUTHOR_NAME')
parser.add_argument('--author-email', help=
'The Email address of the author of the GPX file to output.',
metavar='AUTHOR_EMAIL')
parser.add_argument('--copyright', help=
"The copyright holder of the GPX file to output. Default to `AUTHOR_NAME'."
, metavar='COPYRIGHT')
parser.add_argument('--copyright-year', help=
'The copyright year of the GPX file to output. Default to the year the file is created.'
, metavar='COPYRIGHT_YEAR')
parser.add_argument('--copyright-license', help=
'A link to an external file containing license text.', metavar=
'LICENSE')
parser.add_argument('--keywords', help=
'Keywords associated with the GPX file to output.', metavar=
'KEYWORDS')
parser.add_argument('--track-name', help='The name of the track.',
metavar='TRACK_NAME')
parser.add_argument('--track-comment', help=
'The comment of the track.', metavar='TRACK_COMMENT')
parser.add_argument('--track-description', help=
'The description of the track.', metavar='TRACK_DESCRIPTION')
parser.add_argument('--track-type', default=_DEFAULT_TRACK_TYPE,
help=f"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.")
parser.add_argument('--uniq', choices=['first', 'last'], help=
'How to process different coordinates recorded at the same timestamp. Default to an error.'
)
parser.add_argument('--overwrite', action='store_true', help=
'Allow to overwrite an existing file.')
args = parser.parse_args()
self._input_paths = []
for input_path in args.input_paths:
input_path = pathlib.Path(input_path)
if not input_path.exists():
print(f'{input_path}: File does not exist.', file=sys.stderr)
sys.exit(1)
self._input_paths.append(input_path)
self._name = args.name
self._description = args.description
self._author_name = args.author_name
self._author_email = args.author_email
self._copyright = args.copyright
if self._copyright is None and self._author_name is not None:
self._copyright = self._author_name
self._copyright_year = args.copyright_year
if self._copyright_year is not None and self._copyright is None:
print("`--copyright-year' is specified, but `--copyright' is not.",
file=sys.stderr)
sys.exit(1)
if self._copyright_year is None and self._copyright is not None:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
self._copyright_year = local_aware_now.year
self._copyright_license = args.copyright_license
if self._copyright_license is not None and self._copyright is None:
print(
"`--copyright-license' is specified, but `--copyright' is not."
, file=sys.stderr)
sys.exit(1)
self._keywords = args.keywords
self._track_name = args.track_name
self._track_comment = args.track_comment
self._track_description = args.track_description
self._track_type = args.track_type
if self._track_type is None:
self._track_type = _DEFAULT_TRACK_TYPE
if self._track_type == '':
self._track_type = None
self._how_to_unique = args.uniq
self._overwrite = args.overwrite
@property
def input_paths(self) ->List[pathlib.Path]:
return self._input_paths
@property
def name(self) ->Optional[str]:
return self._name
@property
def description(self) ->Optional[str]:
return self._description
@property
def author_name(self) ->Optional[str]:
return self._author_name
@property
def author_email(self) ->Optional[str]:
return self._author_email
@property
def copyright(self) ->Optional[str]:
return self._copyright
@property
def copyright_year(self) ->Optional[int]:
return self._copyright_year
@property
def copyright_license(self) ->Optional[str]:
return self._copyright_license
@property
def keywords(self) ->Optional[str]:
return self._keywords
@property
def track_name(self) ->Optional[str]:
return self._track_name
@property
def track_comment(self) ->Optional[str]:
return self._track_comment
@property
def track_description(self) ->Optional[str]:
return self._track_description
@property
def track_type(self) ->Optional[str]:
return self._track_type
@property
def how_to_unique(self) ->str:
return self._how_to_unique
@property
def overwrite(self) ->bool:
return self._overwrite
class BrokenMp4FileError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) ->int:
return self._position
@property
def size(self) ->int:
return self._size
def get_gps_data_block_indices(mp4_file: io.FileIO) ->List[GpsDataBlockIndex]:
target_box_path = ['moov', 'gps ']
while True:
box_size = mp4_file.read(4)
if len(box_size) == 0:
raise GpsDataError(
f'{mp4_file.name}: Could not find any GPS data block index.')
if len(box_size) < 4:
error_position = format(mp4_file.tell() - len(box_size), '#010x')
raise BrokenMp4FileError(
f'{mp4_file.name}:{error_position}: Expect the size of a box, but got EOF.'
)
box_size = int.from_bytes(box_size, 'big')
box_type = mp4_file.read(4)
if len(box_type) < 4:
error_position = format(mp4_file.tell() - len(box_type), '#010x')
raise BrokenMp4FileError(
f'{mp4_file.name}:{error_position}: Expect the type of a box, but got EOF.'
)
box_type = box_type.decode('UTF-8')
if box_size == 0:
box_size = None
next_position = None
elif box_size == 1:
box_size = mp4_file.read(8)
if len(box_size) < 8:
error_position = format(mp4_file.tell() - len(box_size),
'#010x')
raise BrokenMp4FileError(
f'{mp4_file.name}:{error_position}: Expect the size of a box, but got EOF.'
)
box_size = int.from_bytes(box_size, 'big')
next_position = mp4_file.tell() + box_size - 16
else:
next_position = mp4_file.tell() + box_size - 8
if box_type == target_box_path[0]:
target_box_path.pop(0)
if len(target_box_path) == 0:
break
else:
if next_position is None:
raise GpsDataError(
f'{mp4_file.name}: Could not find any GPS data block index.'
)
mp4_file.seek(next_position)
if mp4_file.tell() != next_position:
raise BrokenMp4FileError(
f'{mp4_file.name}: The size of a box is not equal to the actual one.'
)
unknown = mp4_file.read(4)
if len(unknown) < 4:
error_position = format(mp4_file.tell() - len(unknown), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer, but got EOF.'
)
unknown = int.from_bytes(unknown, 'big')
if unknown != 257:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer with value `257', but got `{unknown}'."
)
gps_data_block_count = mp4_file.read(4)
if len(gps_data_block_count) < 4:
error_position = format(mp4_file.tell() - len(gps_data_block_count),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer, but got EOF.'
)
gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')
gps_data_block_indices = []
for i in range(gps_data_block_count):
position = mp4_file.read(4)
if len(position) < 4:
error_position = format(mp4_file.tell() - len(position), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the position of a GPS data block, but got EOF.'
)
position = int.from_bytes(position, 'big')
if position < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the position of a GPS data block, but got an invalid value `{position}'."
)
size = mp4_file.read(4)
if len(size) < 4:
error_position = format(mp4_file.tell() - len(size), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'
)
size = int.from_bytes(size, 'big')
if size < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got an invalid value `{size}'."
)
if position == 0 or size == 0:
print(
f'{mp4_file.name}: Warning: The index of GPS data blocks is not recorded.'
, file=sys.stderr)
else:
gps_data_block_index = GpsDataBlockIndex(position, size)
gps_data_block_indices.append(gps_data_block_index)
if mp4_file.tell() != next_position:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(
f'{mp4_file_path}:{error_position}: Expect EOF, but find additional data.'
)
return gps_data_block_indices
def read_little_endian_single(mp4_file: io.FileIO) ->float:
data = mp4_file.read(4)
if len(data) < 4:
error_position = format(mp4_file.tell() - len(data), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a little-endian single-precision floating point number, but got EOF.'
)
data = int.from_bytes(data, 'little')
sign = (data & 2147483648) >> 31
exponent = ((data & 2139095040) >> 23) - 127
mantissa = data & 8388607 | 8388608
sign = '+' if sign == 0 else '-'
exponent = str(exponent - 23)
mantissa_hex = format(mantissa, '08x')
return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) ->datetime.datetime:
return self._time.astimezone()
def __repr__(self) ->str:
result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.6F')
def __lt__(self, other) ->bool:
return self._degree < other._degree
def __eq__(self, other) ->bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) ->str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) ->str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude
], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,
x_acceleration: int, y_acceleration: int, z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, latitude = {latitude}'
)
if (status == 'V' or status is None) != (longitude is None):
raise ValueError(
f'Inconsistent arguments: status = {status}, longitude = {longitude}'
)
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) ->Time:
return self._time
@property
def status(self) ->str:
return self._status
@property
def latitude(self) ->Optional[Latitude]:
return self._latitude
@property
def longitude(self) ->Optional[Longitude]:
return self._longitude
@property
def speed(self) ->Speed:
return self._speed
@property
def azimuth(self) ->Azimuth:
return self._azimuth
@property
def x_acceleration(self) ->int:
return self._x_acceleration
@property
def y_acceleration(self) ->int:
return self._y_acceleration
@property
def z_acceleration(self) ->int:
return self._z_acceleration
@property
def name(self) ->str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) ->str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) ->str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) ->bool:
return self._time < other._time
def __eq__(self, other) ->bool:
return (self._time == other._time and self._latitude == other.
_latitude and self._longitude == other._longitude)
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) ->None:
self._track_points.append(track_point)
def __len__(self) ->int:
return len(self._track_points)
def __iter__(self) ->Iterable[TrackPoint]:
return iter(self._track_points)
_UNKNOWN_BYTES = (
b'\x00!\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\xbc\xc7\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00<\xdb\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\x18\xb5\x18\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\xa0\xfe\x19\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00 \xf9\x1b\x00\x00\x00\x00\x00\x80\x01\x00\x00\x01\x00\x00\x00\xac\xb3\x1c\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00'
)
def parse_mp4_file(mp4_file_path: pathlib.Path) ->List[TrackPoint]:
track_points = []
with open(mp4_file_path, 'rb') as mp4_file:
gps_data_block_indices = get_gps_data_block_indices(mp4_file)
for gps_data_block_index in gps_data_block_indices:
mp4_file.seek(gps_data_block_index.position)
if mp4_file.tell() != gps_data_block_index.position:
error_position = gps_data_block_index.position
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a GPS data block, but got EOF.'
)
large_block_size = mp4_file.read(4)
if len(large_block_size) < 4:
error_position = format(mp4_file.tell() - len(
large_block_size), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'
)
large_block_size = int.from_bytes(large_block_size, 'big')
if large_block_size != gps_data_block_index.size:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f'{mp4_file_path}:{error_position}: The size of a GPS data block is not equal to the one stored in the index.'
)
large_block_end = mp4_file.tell() - 4 + large_block_size
signature = mp4_file.read(8)
if len(signature) < 8:
error_position = format(mp4_file.tell() - len(signature),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the signature of a GPS data block, but got EOF.'
)
signature = signature.decode('UTF-8')
if signature != 'freeGPS ':
error_position = format(mp4_file.tell() - 8, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `freeGPS ' as the signature of a GPS data block, but got `{signature}'."
)
small_block_size = mp4_file.read(4)
if len(small_block_size) < 4:
error_position = format(mp4_file.tell() - len(
small_block_size), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'
)
small_block_size = int.from_bytes(small_block_size, 'little')
if small_block_size != 88:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `88' as the size of a GPS data block, but got `{small_block_size}'."
)
small_block_end = mp4_file.tell() + small_block_size
padding = mp4_file.read(32)
if len(padding) < 32:
error_position = format(mp4_file.tell() - len(padding), '#010x'
)
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'
)
for j, b in enumerate(padding):
if b != 0:
error_position = format(mp4_file.tell() - 32 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
hour = mp4_file.read(4)
if len(hour) < 4:
error_position = format(mp4_file.tell() - len(hour), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the hour of time, but got EOF.'
)
hour = int.from_bytes(hour, 'little')
if hour < 0 or 24 <= hour:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the hour of time, but got an invalid value `{hour}'."
)
minute = mp4_file.read(4)
if len(minute) < 4:
error_position = format(mp4_file.tell() - len(minute), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the minute of time, but got EOF.'
)
minute = int.from_bytes(minute, 'little')
if minute < 0 or 60 <= minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the minute of time, but got an invalid value `{minute}'."
)
second = mp4_file.read(4)
if len(second) < 4:
error_position = format(mp4_file.tell() - len(second), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the second of time, but got EOF.'
)
second = int.from_bytes(second, 'little')
if second < 0 or 60 <= second:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the second of time, but got an invalid value `{second}'."
)
year = mp4_file.read(4)
if len(year) < 4:
error_position = format(mp4_file.tell() - len(year), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the year of time, but got EOF.'
)
year = int.from_bytes(year, 'little')
if year == 0:
error_position = format(mp4_file.tell() - 4, '#010x')
if hour != 0:
raise GpsDataError(
f"{mp4_file.name}:{error_position}: `year == 0' but `hour != 0'."
)
if minute != 0:
raise GpsDataError(
f"{mp4_file.name}:{error_position}: `year == 0' but `minute != 0'."
)
if second != 0:
raise GpsDataError(
f"{mp4_file.name}:{error_position}: `year == 0' but `second != 0'."
)
else:
year += 2000
month = mp4_file.read(4)
if len(month) < 4:
error_position = format(mp4_file.tell() - len(month), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the month of time, but got EOF.'
)
month = int.from_bytes(month, 'little')
if month == 0:
if year != 0:
raise GpsDataError(
f"{mp4_file.name}:{error_position}: `year != 0' but `month == 0'."
)
assert hour == 0
assert minute == 0
assert second == 0
elif month < 1 or 12 < month:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the month of time, but got an invalid value `{month}'."
)
day = mp4_file.read(4)
if len(day) < 4:
error_position = format(mp4_file.tell() - len(day), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the day of time, but got EOF.'
)
day = int.from_bytes(day, 'little')
if day == 0:
if year != 0:
raise GpsDataError(
f"{mp4_file.name}:{error_position}: `year != 0' but `day == 0'."
)
assert month == 0
assert hour == 0
assert minute == 0
assert second == 0
elif day < 1 or 31 < day:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect the day of time, but got an invalid value `{day}'."
)
if year == 0:
assert month == 0
assert day == 0
assert hour == 0
assert minute == 0
assert second == 0
time = None
else:
time = datetime.datetime.now(datetime.timezone.utc)
time = time.astimezone()
time = time.replace(year=year, month=month, day=day, hour=
hour, minute=minute, second=second, microsecond=0)
time = Time(time)
if time is None:
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect zero-padding, but got EOF.'
)
padding = int.from_bytes(padding, 'little')
if padding != 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero-padding, but got `{padding}'."
)
status = None
latitude_type = '0'
longitude_type = '0'
else:
status = mp4_file.read(1)
if len(status) < 1:
error_position = format(mp4_file.tell() - len(status),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a status character, but got EOF.'
)
status = status.decode('UTF-8')
if status not in ('A', 'V'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `A' or `V' as a status character, but got an invalid character `{status}'."
)
latitude_type = mp4_file.read(1)
if len(latitude_type) < 1:
error_position = format(mp4_file.tell() - len(
latitude_type), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a latitude type, but got EOF.'
)
latitude_type = latitude_type.decode('UTF-8')
if status == 'A':
if latitude_type not in ('N', 'S'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `N' or `S' as a latitude type, but got an invalid character `{latitude_type}'."
)
else:
assert status == 'V'
if latitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `0' as a latitude type, but got an invalid character `{latitude_type}'."
)
longitude_type = mp4_file.read(1)
if len(longitude_type) < 1:
error_position = format(mp4_file.tell() - len(
longitude_type), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a longitude type, but got EOF.'
)
longitude_type = longitude_type.decode('UTF-8')
if status == 'A':
if longitude_type not in ('E', 'W'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `E' or `W' as a longitude type, but got an invalid character `{longitude_type}'."
)
else:
assert status == 'V'
if longitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `0' as a longitude type, but got an invalid character `{longitude_type}'."
)
padding = mp4_file.read(1)
if len(padding) < 1:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'
)
if padding[0] != 0:
error_position = format(mp4_file.tell() - 1, '#010x')
byte = format(padding[0], '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
if status == 'A':
latitude_dmm = read_little_endian_single(mp4_file)
latitude_degree = math.floor(latitude_dmm / 100)
if latitude_degree < 0 or 90 < latitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'."
)
latitude_minute = latitude_dmm - latitude_degree * 100
if latitude_minute < 0 or 60 <= latitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'."
)
latitude_degree += latitude_minute / 60
latitude = Latitude(latitude_degree)
else:
assert status == 'V' or status is None
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'
)
for j, b in enumerate(padding):
if b != 0:
error_position = format(mp4_file.tell() - 4 + j,
'#010x')
byte = format(b, '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
latitude = None
if status == 'A':
longitude_dmm = read_little_endian_single(mp4_file)
longitude_degree = math.floor(longitude_dmm / 100)
if longitude_degree < 0 or 180 < longitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect a longitude in DMM format, but got an invalid value `{longitude_dmm}'."
)
longitude_minute = longitude_dmm - longitude_degree * 100
if longitude_minute < 0 or 60 <= longitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect a longitude in DMM format, but got an invalid value `{longitude_dmm}'."
)
longitude_degree += longitude_minute / 60
longitude = Longitude(longitude_degree)
else:
assert status == 'V' or status is None
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'
)
for j, b in enumerate(padding):
if b != 0:
error_position = format(mp4_file.tell() - 4 + j,
'#010x')
byte = format(b, '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
longitude = None
speed = read_little_endian_single(mp4_file)
speed *= 1852 / 3600
speed = Speed(speed)
azimuth = read_little_endian_single(mp4_file)
if azimuth < 0 or 360 <= azimuth:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect azimuth degree, but got an invalid value `{azimuth}'."
)
azimuth = Azimuth(azimuth)
x_acceleration = mp4_file.read(4)
if len(x_acceleration) < 4:
error_position = format(mp4_file.tell() - len(
x_acceleration), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect X-axis acceleration, but got EOF.'
)
x_acceleration = int.from_bytes(x_acceleration, 'little',
signed=True)
y_acceleration = mp4_file.read(4)
if len(y_acceleration) < 4:
error_position = format(mp4_file.tell() - len(
y_acceleration), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect Y-axis acceleration, but got EOF.'
)
y_acceleration = int.from_bytes(y_acceleration, 'little',
signed=True)
z_acceleration = mp4_file.read(4)
if len(z_acceleration) < 4:
error_position = format(mp4_file.tell() - len(
z_acceleration), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect Z-axis acceleration, but got EOF.'
)
z_acceleration = int.from_bytes(z_acceleration, 'little',
signed=True)
if mp4_file.tell() != small_block_end:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect the end of a GPS data block, but got additional data.'
)
padding_size = large_block_end - small_block_end
if padding_size < 532:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect more than or equal to 532-byte padding, but got only {padding_size}-byte padding.'
)
padding = mp4_file.read(padding_size)
if len(padding) < padding_size:
error_position = format(mp4_file.tell() - len(padding), '#010x'
)
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect {padding_size}-byte padding, but got EOF.'
)
for j, b in enumerate(padding[:420]):
if b != 0:
error_position = format(small_block_end + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
if padding[420:532] != _UNKNOWN_BYTES:
for j, b in enumerate(padding[420:532]):
if b != 0:
error_position = format(small_block_end + 420 + j,
'#010x')
byte = format(b, '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
for j, b in enumerate(padding[532:]):
if b != 0:
error_position = format(small_block_end + 532 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'."
)
track_point = TrackPoint(time, status, latitude, longitude,
speed, azimuth, x_acceleration, y_acceleration, z_acceleration)
track_points.append(track_point)
return track_points
def read_input_paths(input_paths: List[pathlib.Path]) ->List[TrackPoint]:
track_points = []
for input_path in input_paths:
if input_path.is_dir():
file_paths = []
for dirpath, dirnames, filenames in os.walk(input_path):
dirpath = pathlib.Path(dirpath)
for filename in filenames:
file_path = dirpath / filename
if file_path.suffix not in ('.mp4', '.MP4'):
continue
file_paths.append(file_path)
file_paths.sort()
for file_path in file_paths:
track_points.extend(parse_mp4_file(file_path))
else:
track_points.extend(parse_mp4_file(input_path))
return track_points
def write_csv_file(args: Arguments, track_points: List[TrackPoint]
) ->pathlib.Path:
if args.name is None:
print("`--name' is required to output a CSV file.", file=sys.stderr)
sys.exit(1)
csv_file_path = pathlib.Path(f'{args.name}.csv')
if csv_file_path.exists():
if not args.overwrite:
print(f'{csv_file_path}: File already exists.', file=sys.stderr)
sys.exit(1)
with open(csv_file_path, 'w') as csv_file:
for track_point in track_points:
print(track_point.format_as_csv(), file=csv_file)
return csv_file_path
def create_track_segments(args: Arguments, track_points: List[TrackPoint]
) ->List[TrackSegment]:
new_track_points = []
for track_point in track_points:
if track_point.status != 'A':
assert track_point.latitude is None
assert track_point.longitude is None
continue
assert track_point.latitude is not None
assert track_point.longitude is not None
new_track_points.append(track_point)
track_points = new_track_points
track_points.sort()
if len(track_points) == 0:
return []
unique_track_points = []
it = iter(track_points)
representative_track_point = next(it)
while True:
track_point = next(it, None)
if track_point is None:
unique_track_points.append(representative_track_point)
break
if track_point.time != representative_track_point.time:
unique_track_points.append(representative_track_point)
representative_track_point = track_point
continue
if (track_point.latitude == representative_track_point.latitude and
track_point.longitude == representative_track_point.longitude):
continue
if args.how_to_unique == 'first':
continue
elif args.how_to_unique == 'last':
representative_track_point = track_point
else:
raise RuntimeError(
"There exist track points with the same timestamp but different coordinates. Use `--uniq' option."
)
track_segments = []
track_segments.append(TrackSegment())
for track_point in unique_track_points:
track_segments[0].append_track_point(track_point)
return track_segments
def as_xml_attribute(data: str) ->str:
return xml.sax.saxutils.quoteattr(data)
def as_xml_data(data: str) ->str:
return xml.sax.saxutils.escape(data)
def get_local_time_in_iso8601() ->str:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('([+-]\\d{2})(\\d{2})$', '\\1:\\2', local_time_in_iso8601)
def write_gpx_file(args: Arguments, track_segments: List[TrackSegment]
) ->pathlib.Path:
all_track_points = []
for track_segment in track_segments:
for track_point in track_segment:
all_track_points.append(track_point)
name = args.name
if name is None:
if len(all_track_points) == 0:
raise ValueError(
"`--name' is not specified, and there is no track point.")
all_track_points.sort()
name = all_track_points[0].name
gpx_file_path = pathlib.Path(f'{name}.gpx')
bounds = None
if len(all_track_points) > 0:
latitudes = list(t.latitude for t in all_track_points)
latitudes.sort()
longitudes = list(t.longitude for t in all_track_points)
longitudes.sort()
bounds = latitudes[0], longitudes[0], latitudes[-1], longitudes[-1]
if gpx_file_path.exists():
if not args.overwrite:
print(f'{gpx_file_path}: Error: File already exists.', file=sys
.stderr)
sys.exit(1)
with open(gpx_file_path, 'w') as gpx_file:
print('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>',
file=gpx_file)
print(
'<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1" creator="papago2gpx">'
, file=gpx_file)
print(' <metadata>', file=gpx_file)
print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)
if args.description is not None:
description = as_xml_data(args.description)
print(f' <desc>{description}</desc>', file=gpx_file)
if args.author_name is not None or args.author_email is not None:
print(' <author>', file=gpx_file)
if args.author_name is not None:
author_name = as_xml_data(args.author_name)
print(f' <name>{author_name}</name>', file=gpx_file)
if args.author_email is not None:
author_email_parts = args.author_email.split('@', 1)
if len(author_email_parts) != 2:
raise RuntimeError(
f'An invalid E-mail address: {args.author_email}')
author_email_id = as_xml_attribute(author_email_parts[0])
author_email_domain = as_xml_attribute(author_email_parts[1])
print(
f' <email id={author_email_id} domain={author_email_domain}/>'
, file=gpx_file)
print(' </author>', file=gpx_file)
if args.copyright is not None:
copyright = as_xml_attribute(args.copyright)
print(f' <copyright author={copyright}', end='', file=gpx_file)
copyright_year = args.copyright_year
copyright_license = args.copyright_license
if copyright_year is not None or copyright_license is not None:
print('>', file=gpx_file)
if copyright_year is not None:
copyright_year = as_xml_data(str(copyright_year))
print(f' <year>{copyright_year}</year>', file=gpx_file
)
if copyright_license is not None:
copyright_license = as_xml_data(copyright_license)
print(f' <license>{copyright_license}</license>',
file=gpx_file)
print(' </copyright>', file=gpx_file)
else:
print('/>', file=gpx_file)
print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)
if args.keywords is not None:
keywords = as_xml_data(args.keywords)
print(f' <keywords>{keywords}</keywords>', file=gpx_file)
if bounds is not None:
print(
f' <bounds minlat="{bounds[0]}" minlon="{bounds[1]}" maxlat="{bounds[2]}" maxlon="{bounds[3]}"/>'
, file=gpx_file)
print(' </metadata>', file=gpx_file)
print(' <trk>', file=gpx_file)
if args.track_name is not None:
track_name = as_xml_data(args.track_name)
print(f' <name>{track_name}</name>', file=gpx_file)
if args.track_comment is not None:
track_comment = as_xml_data(args.track_comment)
print(f' <cmt>{track_comment}</cmt>', file=gpx_file)
if args.track_description is not None:
track_description = as_xml_data(args.track_description)
print(f' <desc>{track_description}</desc>', file=gpx_file)
if args.track_type is not None:
track_type = as_xml_data(args.track_type)
print(f' <type>{track_type}</type>', file=gpx_file)
for track_segment in track_segments:
print(' <trkseg>', file=gpx_file)
for track_point in track_segment:
print(
f' <trkpt lat="{track_point.latitude}" lon="{track_point.longitude}">'
, file=gpx_file)
print(f' <time>{track_point.time}</time>', file=gpx_file
)
print(' </trkpt>', file=gpx_file)
print(' </trkseg>', file=gpx_file)
print(' </trk>', file=gpx_file)
print('</gpx>', file=gpx_file)
proc = subprocess.run(['xmllint', '--schema', 'gpx.xsd', str(
gpx_file_path)], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='UTF-8')
if proc.returncode != 0:
print(
f"""Failed to validate the GPX file `{gpx_file_path}'.
command: {proc.args}
stdout: {proc.stdout}
stderr: {proc.stderr}
returncode: {proc.returncode}"""
, file=sys.stderr)
return gpx_file_path
if __name__ == '__main__':
args = Arguments()
track_points = read_input_paths(args.input_paths)
csv_file_path = write_csv_file(args, track_points)
print(f"Succeeded! The result is output to `{csv_file_path}'.")
track_segments = create_track_segments(args, track_points)
if args.name is None and len(track_segments) == 0:
print("`--name' is not specified, and there is no track segment.",
file=sys.stderr)
sys.exit(1)
if len(track_segments) == 0:
print('WARNING: There is no track segment.', file=sys.stderr)
gpx_file_path = write_gpx_file(args, track_segments)
print(f"Succeeded! The result is output to `{gpx_file_path}'.")
sys.exit(0)
<|reserved_special_token_1|>
#!/usr/bin/env python3
import re
import datetime
import math
import pathlib
import os
import io
import argparse
import subprocess
import xml.sax.saxutils
from typing import (Optional, List, Iterable)
import sys
_DEFAULT_TRACK_TYPE = 'Dashcam track'
class Arguments(object):
def __init__(self):
parser = argparse.ArgumentParser(
prog='papago2gpx', description='Extract GPS data from MP4 video\
files created by PAPAGO! dashcams, and format them into a GPX file.')
parser.add_argument('input_paths', nargs='+',
help='The path to an input file or directory.',
metavar='INPUT_PATH')
parser.add_argument('--name', help='The name of the GPX file to\
output. Default to 16 deciaml digits representing the first GPS record time.',
metavar='NAME')
parser.add_argument('--description', help='The description of the GPX\
file to output.', metavar='DESCRIPTION')
parser.add_argument('--author-name', help='The name of the author of\
the GPX file to output.', metavar='AUTHOR_NAME')
parser.add_argument('--author-email', help='The Email address of the\
author of the GPX file to output.', metavar='AUTHOR_EMAIL')
parser.add_argument('--copyright', help="The copyright holder of the\
GPX file to output. Default to `AUTHOR_NAME'.", metavar='COPYRIGHT')
parser.add_argument('--copyright-year', help="The copyright year of\
the GPX file to output. Default to the year the file is created.",
metavar='COPYRIGHT_YEAR')
parser.add_argument('--copyright-license', help='A link to an external\
file containing license text.', metavar='LICENSE')
parser.add_argument('--keywords', help='Keywords associated with the\
GPX file to output.', metavar='KEYWORDS')
parser.add_argument('--track-name', help='The name of the track.',
metavar='TRACK_NAME')
parser.add_argument(
'--track-comment', help='The comment of the track.',
metavar='TRACK_COMMENT')
parser.add_argument('--track-description', help="The description of\
the track.", metavar='TRACK_DESCRIPTION')
parser.add_argument(
'--track-type', default=_DEFAULT_TRACK_TYPE,
help=f"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.")
parser.add_argument('--uniq', choices=['first', 'last'],
help='How to process different coordinates\
recorded at the same timestamp. Default to an error.')
parser.add_argument('--overwrite', action='store_true',
help='Allow to overwrite an existing file.')
args = parser.parse_args()
self._input_paths = []
for input_path in args.input_paths:
input_path = pathlib.Path(input_path)
if not input_path.exists():
print(f"{input_path}: File does not exist.", file=sys.stderr)
sys.exit(1)
self._input_paths.append(input_path)
self._name = args.name
self._description = args.description
self._author_name = args.author_name
self._author_email = args.author_email
self._copyright = args.copyright
if self._copyright is None and self._author_name is not None:
self._copyright = self._author_name
self._copyright_year = args.copyright_year
if self._copyright_year is not None and self._copyright is None:
print("`--copyright-year' is specified, but `--copyright' is not.",
file=sys.stderr)
sys.exit(1)
if self._copyright_year is None and self._copyright is not None:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
self._copyright_year = local_aware_now.year
self._copyright_license = args.copyright_license
if self._copyright_license is not None and self._copyright is None:
print("`--copyright-license' is specified, but `--copyright' is\
not.", file=sys.stderr)
sys.exit(1)
self._keywords = args.keywords
self._track_name = args.track_name
self._track_comment = args.track_comment
self._track_description = args.track_description
self._track_type = args.track_type
if self._track_type is None:
self._track_type = _DEFAULT_TRACK_TYPE
if self._track_type == '':
self._track_type = None
self._how_to_unique = args.uniq
self._overwrite = args.overwrite
@property
def input_paths(self) -> List[pathlib.Path]:
return self._input_paths
@property
def name(self) -> Optional[str]:
return self._name
@property
def description(self) -> Optional[str]:
return self._description
@property
def author_name(self) -> Optional[str]:
return self._author_name
@property
def author_email(self) -> Optional[str]:
return self._author_email
@property
def copyright(self) -> Optional[str]:
return self._copyright
@property
def copyright_year(self) -> Optional[int]:
return self._copyright_year
@property
def copyright_license(self) -> Optional[str]:
return self._copyright_license
@property
def keywords(self) -> Optional[str]:
return self._keywords
@property
def track_name(self) -> Optional[str]:
return self._track_name
@property
def track_comment(self) -> Optional[str]:
return self._track_comment
@property
def track_description(self) -> Optional[str]:
return self._track_description
@property
def track_type(self) -> Optional[str]:
return self._track_type
@property
def how_to_unique(self) -> str:
return self._how_to_unique
@property
def overwrite(self) -> bool:
return self._overwrite
class BrokenMp4FileError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataError(RuntimeError):
def __init__(self, message: str):
super().__init__(message)
class GpsDataBlockIndex(object):
def __init__(self, position: int, size: int):
if position <= 0:
raise ValueError(f"An invalid position: `{position}'.")
if size <= 0:
raise ValueError(f"An invalid size: `{size}'.")
self._position = position
self._size = size
@property
def position(self) -> int:
return self._position
@property
def size(self) -> int:
return self._size
def get_gps_data_block_indices(mp4_file: io.FileIO) -> List[GpsDataBlockIndex]:
target_box_path = ['moov', 'gps ']
while True:
box_size = mp4_file.read(4)
if len(box_size) == 0:
raise GpsDataError(
f'{mp4_file.name}: Could not find any GPS data block index.')
if len(box_size) < 4:
error_position = format(mp4_file.tell() - len(box_size), '#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the size of a box, but got EOF.')
box_size = int.from_bytes(box_size, 'big')
box_type = mp4_file.read(4)
if len(box_type) < 4:
error_position = format(mp4_file.tell() - len(box_type), '#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the type of a box, but got EOF.')
box_type = box_type.decode('UTF-8')
if box_size == 0:
box_size = None
next_position = None
elif box_size == 1:
box_size = mp4_file.read(8)
if len(box_size) < 8:
error_position = format(mp4_file.tell() - len(box_size),
'#010x')
raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\
Expect the size of a box, but got EOF.')
box_size = int.from_bytes(box_size, 'big')
next_position = mp4_file.tell() + box_size - 16
else:
next_position = mp4_file.tell() + box_size - 8
if box_type == target_box_path[0]:
target_box_path.pop(0)
if len(target_box_path) == 0:
break
else:
if next_position is None:
raise GpsDataError(f'{mp4_file.name}: Could not find any GPS'
' data block index.')
mp4_file.seek(next_position)
if mp4_file.tell() != next_position:
raise BrokenMp4FileError(f'{mp4_file.name}: The size of a box\
is not equal to the actual one.')
unknown = mp4_file.read(4)
if len(unknown) < 4:
error_position = format(mp4_file.tell() - len(unknown), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'
' big-endian 32-bit unsigned integer, but got EOF.')
unknown = int.from_bytes(unknown, 'big')
if unknown != 257:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect a\
big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.")
gps_data_block_count = mp4_file.read(4)
if len(gps_data_block_count) < 4:
error_position = format(mp4_file.tell() - len(gps_data_block_count),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'
' big-endian 32-bit unsigned integer, but got EOF.')
gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')
gps_data_block_indices = []
for i in range(gps_data_block_count):
position = mp4_file.read(4)
if len(position) < 4:
error_position = format(mp4_file.tell() - len(position), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'
' position of a GPS data block, but got EOF.')
position = int.from_bytes(position, 'big')
if position < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect the\
position of a GPS data block, but got an invalid value `{position}'.")
size = mp4_file.read(4)
if len(size) < 4:
error_position = format(mp4_file.tell() - len(size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'
' size of a GPS data block, but got EOF.')
size = int.from_bytes(size, 'big')
if size < 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect the\
size of a GPS data block, but got an invalid value `{size}'.")
if position == 0 or size == 0:
print(f'{mp4_file.name}: Warning: The index of GPS data blocks is\
not recorded.', file=sys.stderr)
else:
gps_data_block_index = GpsDataBlockIndex(position, size)
gps_data_block_indices.append(gps_data_block_index)
if mp4_file.tell() != next_position:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file_path}:{error_position}: Expect EOF, but'
' find additional data.')
return gps_data_block_indices
def read_little_endian_single(mp4_file: io.FileIO) -> float:
data = mp4_file.read(4)
if len(data) < 4:
error_position = format(mp4_file.tell() - len(data), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a\
little-endian single-precision floating point number, but got EOF.')
data = int.from_bytes(data, 'little')
sign = (data & 0x80000000) >> 31
exponent = ((data & 0x7F800000) >> 23) - 127
mantissa = (data & 0x007FFFFF) | 0x00800000
sign = '+' if sign == 0 else '-'
exponent = str(exponent - 23)
mantissa_hex = format(mantissa, '08x')
return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')
class Time(object):
def __init__(self, time: datetime.datetime):
if time.tzinfo is None:
raise ValueError(
"Expect an aware `datetime' object, but got naive one.")
self._time = time.astimezone(datetime.timezone.utc)
def as_local_time(self) -> datetime.datetime:
return self._time.astimezone()
def __repr__(self) -> str:
result = self._time.strftime("%Y-%m-%dT%H:%M:%S%z")
return re.sub('(\\+\\d{2})(\\d{2})$', '\\1:\\2', result)
def __lt__(self, other) -> bool:
return self._time < other._time
def __eq__(self, other) -> bool:
return self._time == other._time
class Latitude(object):
def __init__(self, degree: float):
if degree < -90 or 90 < degree:
raise ValueError("An invalid latitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.6F')
def __lt__(self, other) -> bool:
return self._degree < other._degree
def __eq__(self, other) -> bool:
return self._degree == other._degree
class Longitude(object):
def __init__(self, degree: float):
if degree < -180 or 180 < degree:
raise ValueError("An invalid longitude degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.6F')
def __lt__(self, other) -> bool:
return self._degree < other._degree
def __eq__(self, other) -> bool:
return self._degree == other._degree
class Speed(object):
def __init__(self, meter_per_second: float):
self._meter_per_second = meter_per_second
def __repr__(self) -> str:
return format(self._meter_per_second, '.2F')
class Azimuth(object):
def __init__(self, degree: float):
if degree < 0 or 360 <= degree:
raise ValueError(f"An invalid azimuth degree: `{degree}'.")
self._degree = degree
def __repr__(self) -> str:
return format(self._degree, '.2F')
class TrackPoint(object):
def __init__(self, time: Time, status: str, latitude: Optional[Latitude],
longitude: Optional[Longitude], speed: Speed,
azimuth: Azimuth, x_acceleration: int, y_acceleration: int,
z_acceleration: int):
if (status == 'V' or status is None) != (latitude is None):
raise ValueError('Inconsistent arguments:'
f' status = {status}, latitude = {latitude}')
if (status == 'V' or status is None) != (longitude is None):
raise ValueError('Inconsistent arguments:'
f' status = {status}, longitude = {longitude}')
self._time = time
self._status = status
self._latitude = latitude
self._longitude = longitude
self._speed = speed
self._azimuth = azimuth
self._x_acceleration = x_acceleration
self._y_acceleration = y_acceleration
self._z_acceleration = z_acceleration
@property
def time(self) -> Time:
return self._time
@property
def status(self) -> str:
return self._status
@property
def latitude(self) -> Optional[Latitude]:
return self._latitude
@property
def longitude(self) -> Optional[Longitude]:
return self._longitude
@property
def speed(self) -> Speed:
return self._speed
@property
def azimuth(self) -> Azimuth:
return self._azimuth
@property
def x_acceleration(self) -> int:
return self._x_acceleration
@property
def y_acceleration(self) -> int:
return self._y_acceleration
@property
def z_acceleration(self) -> int:
return self._z_acceleration
@property
def name(self) -> str:
local_time = self._time.as_local_time()
return local_time.strftime('%Y%m%d%H%M%S')
def format_as_csv(self) -> str:
if self._time is not None:
local_time = self._time.as_local_time()
result = local_time.strftime('%Y/%m/%d %H:%M:%S')
else:
result = ''
status = self._status if self._status is not None else ''
result += f',{status}'
latitude = str(self._latitude) if self._latitude is not None else ''
result += f',{latitude}'
longitude = str(self._longitude) if self._longitude is not None else ''
result += f',{longitude}'
result += f',{self._speed}'
result += f',{self._azimuth}'
result += f',{self._x_acceleration}'
result += f',{self._y_acceleration}'
result += f',{self._z_acceleration}'
return result
def __repr__(self) -> str:
latitude = str(self._latitude) if self._latitude is not None else ''
longitude = str(self._longitude) if self._longitude is not None else ''
return f'{self._time},{latitude},{longitude}'
def __lt__(self, other) -> bool:
return self._time < other._time
def __eq__(self, other) -> bool:
return self._time == other._time and self._latitude == other._latitude\
and self._longitude == other._longitude
class TrackSegment(object):
def __init__(self):
self._track_points = []
def append_track_point(self, track_point: TrackPoint) -> None:
self._track_points.append(track_point)
def __len__(self) -> int:
return len(self._track_points)
def __iter__(self) -> Iterable[TrackPoint]:
return iter(self._track_points)
_UNKNOWN_BYTES\
= b'\x00\x21\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\xBC\xC7\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x3C\xDB\x17\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x18\xB5\x18\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\xA0\xFE\x19\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00\
\x20\xF9\x1B\x00\x00\x00\x00\x00\x80\x01\x00\x00\x01\x00\x00\x00\
\xAC\xB3\x1C\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00\x00\x00'
def parse_mp4_file(mp4_file_path: pathlib.Path) -> List[TrackPoint]:
track_points = []
with open(mp4_file_path, 'rb') as mp4_file:
gps_data_block_indices = get_gps_data_block_indices(mp4_file)
for gps_data_block_index in gps_data_block_indices:
mp4_file.seek(gps_data_block_index.position)
if mp4_file.tell() != gps_data_block_index.position:
error_position = gps_data_block_index.position
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' a GPS data block, but got EOF.')
large_block_size = mp4_file.read(4)
if len(large_block_size) < 4:
error_position = format(
mp4_file.tell() - len(large_block_size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the size of a GPS data block, but got EOF.')
large_block_size = int.from_bytes(large_block_size, 'big')
if large_block_size != gps_data_block_index.size:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f'{mp4_file_path}:{error_position}: The\
size of a GPS data block is not equal to the one stored in the index.')
large_block_end = mp4_file.tell() - 4 + large_block_size
signature = mp4_file.read(8)
if len(signature) < 8:
error_position = format(mp4_file.tell() - len(signature),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the signature of a GPS data block, but got EOF.')
signature = signature.decode('UTF-8')
if signature != 'freeGPS ':
error_position = format(mp4_file.tell() - 8, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
`freeGPS ' as the signature of a GPS data block, but got `{signature}'.")
small_block_size = mp4_file.read(4)
if len(small_block_size) < 4:
error_position = format(
mp4_file.tell() - len(small_block_size), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the size of a GPS data block, but got EOF.')
small_block_size = int.from_bytes(small_block_size, 'little')
if small_block_size != 88:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
`88' as the size of a GPS data block, but got `{small_block_size}'.")
small_block_end = mp4_file.tell() + small_block_size
padding = mp4_file.read(32)
if len(padding) < 32:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(mp4_file.tell() - 32 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
hour = mp4_file.read(4)
if len(hour) < 4:
error_position = format(mp4_file.tell() - len(hour), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the hour of time, but got EOF.')
hour = int.from_bytes(hour, 'little')
if hour < 0 or 24 <= hour:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the hour of time, but got an invalid value `{hour}'.")
minute = mp4_file.read(4)
if len(minute) < 4:
error_position = format(mp4_file.tell() - len(minute), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the minute of time, but got EOF.')
minute = int.from_bytes(minute, 'little')
if minute < 0 or 60 <= minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the minute of time, but got an invalid value `{minute}'.")
second = mp4_file.read(4)
if len(second) < 4:
error_position = format(mp4_file.tell() - len(second), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the second of time, but got EOF.')
second = int.from_bytes(second, 'little')
if second < 0 or 60 <= second:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the second of time, but got an invalid value `{second}'.")
year = mp4_file.read(4)
if len(year) < 4:
error_position = format(mp4_file.tell() - len(year), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the year of time, but got EOF.')
year = int.from_bytes(year, 'little')
if year == 0:
error_position = format(mp4_file.tell() - 4, '#010x')
if hour != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `hour != 0'.")
if minute != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `minute != 0'.")
if second != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year == 0' but `second != 0'.")
else:
year += 2000
month = mp4_file.read(4)
if len(month) < 4:
error_position = format(mp4_file.tell() - len(month), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the month of time, but got EOF.')
month = int.from_bytes(month, 'little')
if month == 0:
if year != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year != 0' but `month == 0'.")
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
elif month < 1 or 12 < month:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the month of time, but got an invalid value `{month}'.")
day = mp4_file.read(4)
if len(day) < 4:
error_position = format(mp4_file.tell() - len(day), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' the day of time, but got EOF.')
day = int.from_bytes(day, 'little')
if day == 0:
if year != 0:
raise GpsDataError(f"{mp4_file.name}:{error_position}:"
" `year != 0' but `day == 0'.")
assert(month == 0)
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
elif day < 1 or 31 < day:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
the day of time, but got an invalid value `{day}'.")
if year == 0:
assert(month == 0)
assert(day == 0)
assert(hour == 0)
assert(minute == 0)
assert(second == 0)
time = None
else:
time = datetime.datetime.now(datetime.timezone.utc)
time = time.astimezone()
time = time.replace(
year=year, month=month, day=day, hour=hour, minute=minute,
second=second, microsecond=0)
time = Time(time)
if time is None:
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero-padding, but got EOF.')
padding = int.from_bytes(padding, 'little')
if padding != 0:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect"
f" zero-padding, but got `{padding}'.")
status = None
latitude_type = '0'
longitude_type = '0'
else:
status = mp4_file.read(1)
if len(status) < 1:
error_position = format(mp4_file.tell() - len(status),
'#010x')
raise GpsDataError(
f'{mp4_file.name}:{error_position}: Expect a status'
' character, but got EOF.')
status = status.decode('UTF-8')
if status not in ('A', 'V'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `A' or `V' as a status character, but got an invalid character\
`{status}'.")
latitude_type = mp4_file.read(1)
if len(latitude_type) < 1:
error_position = format(
mp4_file.tell() - len(latitude_type), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:\
Expect a latitude type, but got EOF.')
latitude_type = latitude_type.decode('UTF-8')
if status == 'A':
if latitude_type not in ('N', 'S'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `N' or\
`S' as a latitude type, but got an invalid character `{latitude_type}'.")
else:
assert(status == 'V')
if latitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `0' as a latitude type, but got an invalid character\
`{latitude_type}'.")
longitude_type = mp4_file.read(1)
if len(longitude_type) < 1:
error_position = format(
mp4_file.tell() - len(longitude_type), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:\
Expect a longitude type, but got EOF.')
longitude_type = longitude_type.decode('UTF-8')
if status == 'A':
if longitude_type not in ('E', 'W'):
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(
f"{mp4_file.name}:{error_position}: Expect `E' or\
`W' as a longitude type, but got an invalid character `{longitude_type}'.")
else:
assert(status == 'V')
if longitude_type != '0':
error_position = format(mp4_file.tell() - 1, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect `0' as a longitude type, but got an invalid character\
`{longitude_type}'.")
padding = mp4_file.read(1)
if len(padding) < 1:
error_position = format(mp4_file.tell() - len(padding),
'#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
if padding[0] != 0:
error_position = format(mp4_file.tell() - 1, '#010x')
byte = format(padding[0], '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
if status == 'A':
latitude_dmm = read_little_endian_single(mp4_file)
latitude_degree = math.floor(latitude_dmm / 100)
if latitude_degree < 0 or 90 < latitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.")
latitude_minute = latitude_dmm - latitude_degree * 100
if latitude_minute < 0 or 60 <= latitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.")
latitude_degree += latitude_minute / 60
latitude = Latitude(latitude_degree)
else:
assert(status == 'V' or status is None)
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(
mp4_file.tell() - 4 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
latitude = None
if status == 'A':
longitude_dmm = read_little_endian_single(mp4_file)
longitude_degree = math.floor(longitude_dmm / 100)
if longitude_degree < 0 or 180 < longitude_degree:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a longitude in DMM format, but got an invalid value\
`{longitude_dmm}'.")
longitude_minute = longitude_dmm - longitude_degree * 100
if longitude_minute < 0 or 60 <= longitude_minute:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect a longitude in DMM format, but got an invalid value\
`{longitude_dmm}'.")
longitude_degree += longitude_minute / 60
longitude = Longitude(longitude_degree)
else:
assert(status == 'V' or status is None)
padding = mp4_file.read(4)
if len(padding) < 4:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}:'
' Expect zero padding, but got EOF.')
for j, b in enumerate(padding):
if b != 0:
error_position = format(
mp4_file.tell() - 4 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
longitude = None
speed = read_little_endian_single(mp4_file)
# Presume that speed is recorded in knots.
speed *= (1852 / 3600)
speed = Speed(speed)
azimuth = read_little_endian_single(mp4_file)
if azimuth < 0 or 360 <= azimuth:
error_position = format(mp4_file.tell() - 4, '#010x')
raise GpsDataError(f"{mp4_file.name}:{error_position}: Expect\
azimuth degree, but got an invalid value `{azimuth}'.")
azimuth = Azimuth(azimuth)
x_acceleration = mp4_file.read(4)
if len(x_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(x_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' X-axis acceleration, but got EOF.')
x_acceleration = int.from_bytes(
x_acceleration, 'little', signed=True)
y_acceleration = mp4_file.read(4)
if len(y_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(y_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' Y-axis acceleration, but got EOF.')
y_acceleration = int.from_bytes(
y_acceleration, 'little', signed=True)
z_acceleration = mp4_file.read(4)
if len(z_acceleration) < 4:
error_position = format(
mp4_file.tell() - len(z_acceleration), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'
' Z-axis acceleration, but got EOF.')
z_acceleration = int.from_bytes(
z_acceleration, 'little', signed=True)
if mp4_file.tell() != small_block_end:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
the end of a GPS data block, but got additional data.')
padding_size = large_block_end - small_block_end
if padding_size < 532:
error_position = format(mp4_file.tell(), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
more than or equal to 532-byte padding, but got only {padding_size}-byte\
padding.')
padding = mp4_file.read(padding_size)
if len(padding) < padding_size:
error_position = format(
mp4_file.tell() - len(padding), '#010x')
raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\
{padding_size}-byte padding, but got EOF.')
for j, b in enumerate(padding[:420]):
if b != 0:
error_position = format(small_block_end + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
# `_UNKNOWN_BYTES` may appear in the zero padding. However,
# what this means is unknown. Therefore, just skip it if it
# appears.
if padding[420:532] != _UNKNOWN_BYTES:
for j, b in enumerate(padding[420:532]):
if b != 0:
error_position = format(small_block_end + 420 + j,
'#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
for j, b in enumerate(padding[532:]):
if b != 0:
error_position = format(small_block_end + 532 + j, '#010x')
byte = format(b, '#04x')
raise GpsDataError(f"{mp4_file.name}:{error_position}:\
Expect zero padding, but got an invalid byte `{byte}'.")
track_point = TrackPoint(
time, status, latitude, longitude, speed, azimuth,
x_acceleration, y_acceleration, z_acceleration)
track_points.append(track_point)
return track_points
def read_input_paths(input_paths: List[pathlib.Path]) -> List[TrackPoint]:
track_points = []
for input_path in input_paths:
if input_path.is_dir():
file_paths = []
for dirpath, dirnames, filenames in os.walk(input_path):
dirpath = pathlib.Path(dirpath)
for filename in filenames:
file_path = dirpath / filename
if file_path.suffix not in ('.mp4', '.MP4'):
continue
file_paths.append(file_path)
file_paths.sort()
for file_path in file_paths:
track_points.extend(parse_mp4_file(file_path))
else:
track_points.extend(parse_mp4_file(input_path))
return track_points
def write_csv_file(args: Arguments,
track_points: List[TrackPoint]) -> pathlib.Path:
if args.name is None:
print("`--name' is required to output a CSV file.", file=sys.stderr)
sys.exit(1)
csv_file_path = pathlib.Path(f'{args.name}.csv')
if csv_file_path.exists():
if not args.overwrite:
print(f"{csv_file_path}: File already exists.", file=sys.stderr)
sys.exit(1)
with open(csv_file_path, 'w') as csv_file:
for track_point in track_points:
print(track_point.format_as_csv(), file=csv_file)
return csv_file_path
def create_track_segments(
args: Arguments, track_points: List[TrackPoint]) -> List[TrackSegment]:
new_track_points = []
for track_point in track_points:
if track_point.status != 'A':
assert(track_point.latitude is None)
assert(track_point.longitude is None)
continue
assert(track_point.latitude is not None)
assert(track_point.longitude is not None)
new_track_points.append(track_point)
track_points = new_track_points
track_points.sort()
if len(track_points) == 0:
return []
unique_track_points = []
it = iter(track_points)
representative_track_point = next(it)
while True:
track_point = next(it, None)
if track_point is None:
unique_track_points.append(representative_track_point)
break
if track_point.time != representative_track_point.time:
unique_track_points.append(representative_track_point)
representative_track_point = track_point
continue
if track_point.latitude == representative_track_point.latitude\
and track_point.longitude == representative_track_point.longitude:
continue
if args.how_to_unique == 'first':
continue
elif args.how_to_unique == 'last':
representative_track_point = track_point
else:
raise RuntimeError("There exist track points with the same\
timestamp but different coordinates. Use `--uniq' option.")
track_segments = []
track_segments.append(TrackSegment())
for track_point in unique_track_points:
track_segments[0].append_track_point(track_point)
return track_segments
def as_xml_attribute(data: str) -> str:
return xml.sax.saxutils.quoteattr(data)
def as_xml_data(data: str) -> str:
return xml.sax.saxutils.escape(data)
def get_local_time_in_iso8601() -> str:
utc_now = datetime.datetime.now(datetime.timezone.utc)
local_aware_now = utc_now.astimezone()
local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')
return re.sub('([+-]\\d{2})(\\d{2})$', '\\1:\\2', local_time_in_iso8601)
def write_gpx_file(args: Arguments,
track_segments: List[TrackSegment]) -> pathlib.Path:
all_track_points = []
for track_segment in track_segments:
for track_point in track_segment:
all_track_points.append(track_point)
name = args.name
if name is None:
if len(all_track_points) == 0:
raise ValueError(
"`--name' is not specified, and there is no track point.")
all_track_points.sort()
name = all_track_points[0].name
gpx_file_path = pathlib.Path(f'{name}.gpx')
bounds = None
if len(all_track_points) > 0:
latitudes = list(t.latitude for t in all_track_points)
latitudes.sort()
longitudes = list(t.longitude for t in all_track_points)
longitudes.sort()
bounds = (latitudes[0], longitudes[0], latitudes[-1], longitudes[-1])
if gpx_file_path.exists():
if not args.overwrite:
print(f'{gpx_file_path}: Error: File already exists.',
file=sys.stderr)
sys.exit(1)
with open(gpx_file_path, 'w') as gpx_file:
print('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>',
file=gpx_file)
print('<gpx xmlns="http://www.topografix.com/GPX/1/1" version="1.1"'
' creator="papago2gpx">', file=gpx_file)
print(' <metadata>', file=gpx_file)
print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)
if args.description is not None:
description = as_xml_data(args.description)
print(f' <desc>{description}</desc>', file=gpx_file)
if args.author_name is not None or args.author_email is not None:
print(' <author>', file=gpx_file)
if args.author_name is not None:
author_name = as_xml_data(args.author_name)
print(f' <name>{author_name}</name>', file=gpx_file)
if args.author_email is not None:
author_email_parts = args.author_email.split('@', 1)
if len(author_email_parts) != 2:
raise RuntimeError(
f'An invalid E-mail address: {args.author_email}')
author_email_id = as_xml_attribute(author_email_parts[0])
author_email_domain = as_xml_attribute(author_email_parts[1])
print(f' <email id={author_email_id}\
domain={author_email_domain}/>', file=gpx_file)
print(' </author>', file=gpx_file)
if args.copyright is not None:
copyright = as_xml_attribute(args.copyright)
print(f' <copyright author={copyright}', end='', file=gpx_file)
copyright_year = args.copyright_year
copyright_license = args.copyright_license
if copyright_year is not None or copyright_license is not None:
print('>', file=gpx_file)
if copyright_year is not None:
copyright_year = as_xml_data(str(copyright_year))
print(f' <year>{copyright_year}</year>',
file=gpx_file)
if copyright_license is not None:
copyright_license = as_xml_data(copyright_license)
print(f' <license>{copyright_license}</license>',
file=gpx_file)
print(' </copyright>', file=gpx_file)
else:
print('/>', file=gpx_file)
print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)
if args.keywords is not None:
keywords = as_xml_data(args.keywords)
print(f' <keywords>{keywords}</keywords>', file=gpx_file)
if bounds is not None:
print(f' <bounds minlat="{bounds[0]}" minlon="{bounds[1]}"\
maxlat="{bounds[2]}" maxlon="{bounds[3]}"/>', file=gpx_file)
print(' </metadata>', file=gpx_file)
print(' <trk>', file=gpx_file)
if args.track_name is not None:
track_name = as_xml_data(args.track_name)
print(f' <name>{track_name}</name>', file=gpx_file)
if args.track_comment is not None:
track_comment = as_xml_data(args.track_comment)
print(f' <cmt>{track_comment}</cmt>', file=gpx_file)
if args.track_description is not None:
track_description = as_xml_data(args.track_description)
print(f' <desc>{track_description}</desc>', file=gpx_file)
if args.track_type is not None:
track_type = as_xml_data(args.track_type)
print(f' <type>{track_type}</type>', file=gpx_file)
for track_segment in track_segments:
print(' <trkseg>', file=gpx_file)
for track_point in track_segment:
print(f' <trkpt lat="{track_point.latitude}"\
lon="{track_point.longitude}">', file=gpx_file)
print(f' <time>{track_point.time}</time>',
file=gpx_file)
print(' </trkpt>', file=gpx_file)
print(' </trkseg>', file=gpx_file)
print(' </trk>', file=gpx_file)
print('</gpx>', file=gpx_file)
proc = subprocess.run(
['xmllint', '--schema', 'gpx.xsd', str(gpx_file_path)],
stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, encoding='UTF-8')
if proc.returncode != 0:
print(f"""Failed to validate the GPX file `{gpx_file_path}'.
command: {proc.args}
stdout: {proc.stdout}
stderr: {proc.stderr}
returncode: {proc.returncode}""", file=sys.stderr)
return gpx_file_path
if __name__ == '__main__':
args = Arguments()
track_points = read_input_paths(args.input_paths)
csv_file_path = write_csv_file(args, track_points)
print(f"Succeeded! The result is output to `{csv_file_path}'.")
track_segments = create_track_segments(args, track_points)
if args.name is None and len(track_segments) == 0:
print("`--name' is not specified, and there is no track segment.",
file=sys.stderr)
sys.exit(1)
if len(track_segments) == 0:
print('WARNING: There is no track segment.', file=sys.stderr)
gpx_file_path = write_gpx_file(args, track_segments)
print(f"Succeeded! The result is output to `{gpx_file_path}'.")
sys.exit(0)
|
flexible
|
{
"blob_id": "fbb1254c7166fa2aa9cd8a0b9c6525dbe5b652a0",
"index": 2625,
"step-1": "<mask token>\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\n<mask token>\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Arguments(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def author_name(self) ->Optional[str]:\n return self._author_name\n <mask token>\n\n @property\n def copyright(self) ->Optional[str]:\n return self._copyright\n <mask token>\n <mask token>\n\n @property\n def keywords(self) ->Optional[str]:\n return self._keywords\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BrokenMp4FileError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\n<mask token>\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Arguments(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(prog='papago2gpx', description=\n 'Extract GPS data from MP4 video files created by PAPAGO! dashcams, and format them into a GPX file.'\n )\n parser.add_argument('input_paths', nargs='+', help=\n 'The path to an input file or directory.', metavar='INPUT_PATH')\n parser.add_argument('--name', help=\n 'The name of the GPX file to output. Default to 16 deciaml digits representing the first GPS record time.'\n , metavar='NAME')\n parser.add_argument('--description', help=\n 'The description of the GPX file to output.', metavar='DESCRIPTION'\n )\n parser.add_argument('--author-name', help=\n 'The name of the author of the GPX file to output.', metavar=\n 'AUTHOR_NAME')\n parser.add_argument('--author-email', help=\n 'The Email address of the author of the GPX file to output.',\n metavar='AUTHOR_EMAIL')\n parser.add_argument('--copyright', help=\n \"The copyright holder of the GPX file to output. Default to `AUTHOR_NAME'.\"\n , metavar='COPYRIGHT')\n parser.add_argument('--copyright-year', help=\n 'The copyright year of the GPX file to output. Default to the year the file is created.'\n , metavar='COPYRIGHT_YEAR')\n parser.add_argument('--copyright-license', help=\n 'A link to an external file containing license text.', metavar=\n 'LICENSE')\n parser.add_argument('--keywords', help=\n 'Keywords associated with the GPX file to output.', metavar=\n 'KEYWORDS')\n parser.add_argument('--track-name', help='The name of the track.',\n metavar='TRACK_NAME')\n parser.add_argument('--track-comment', help=\n 'The comment of the track.', metavar='TRACK_COMMENT')\n parser.add_argument('--track-description', help=\n 'The description of the track.', metavar='TRACK_DESCRIPTION')\n parser.add_argument('--track-type', default=_DEFAULT_TRACK_TYPE,\n help=f\"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.\")\n parser.add_argument('--uniq', choices=['first', 'last'], help=\n 'How to process different coordinates recorded at the same timestamp. Default to an error.'\n )\n parser.add_argument('--overwrite', action='store_true', help=\n 'Allow to overwrite an existing file.')\n args = parser.parse_args()\n self._input_paths = []\n for input_path in args.input_paths:\n input_path = pathlib.Path(input_path)\n if not input_path.exists():\n print(f'{input_path}: File does not exist.', file=sys.stderr)\n sys.exit(1)\n self._input_paths.append(input_path)\n self._name = args.name\n self._description = args.description\n self._author_name = args.author_name\n self._author_email = args.author_email\n self._copyright = args.copyright\n if self._copyright is None and self._author_name is not None:\n self._copyright = self._author_name\n self._copyright_year = args.copyright_year\n if self._copyright_year is not None and self._copyright is None:\n print(\"`--copyright-year' is specified, but `--copyright' is not.\",\n file=sys.stderr)\n sys.exit(1)\n if self._copyright_year is None and self._copyright is not None:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n self._copyright_year = local_aware_now.year\n self._copyright_license = args.copyright_license\n if self._copyright_license is not None and self._copyright is None:\n print(\n \"`--copyright-license' is specified, but `--copyright' is not.\"\n , file=sys.stderr)\n sys.exit(1)\n self._keywords = args.keywords\n self._track_name = args.track_name\n self._track_comment = args.track_comment\n self._track_description = args.track_description\n self._track_type = args.track_type\n if self._track_type is None:\n self._track_type = _DEFAULT_TRACK_TYPE\n if self._track_type == '':\n self._track_type = None\n self._how_to_unique = args.uniq\n self._overwrite = args.overwrite\n\n @property\n def input_paths(self) ->List[pathlib.Path]:\n return self._input_paths\n <mask token>\n\n @property\n def description(self) ->Optional[str]:\n return self._description\n\n @property\n def author_name(self) ->Optional[str]:\n return self._author_name\n\n @property\n def author_email(self) ->Optional[str]:\n return self._author_email\n\n @property\n def copyright(self) ->Optional[str]:\n return self._copyright\n\n @property\n def copyright_year(self) ->Optional[int]:\n return self._copyright_year\n\n @property\n def copyright_license(self) ->Optional[str]:\n return self._copyright_license\n\n @property\n def keywords(self) ->Optional[str]:\n return self._keywords\n\n @property\n def track_name(self) ->Optional[str]:\n return self._track_name\n\n @property\n def track_comment(self) ->Optional[str]:\n return self._track_comment\n\n @property\n def track_description(self) ->Optional[str]:\n return self._track_description\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BrokenMp4FileError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\n<mask token>\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n<mask token>\n",
"step-4": "import re\nimport datetime\nimport math\nimport pathlib\nimport os\nimport io\nimport argparse\nimport subprocess\nimport xml.sax.saxutils\nfrom typing import Optional, List, Iterable\nimport sys\n_DEFAULT_TRACK_TYPE = 'Dashcam track'\n\n\nclass Arguments(object):\n\n def __init__(self):\n parser = argparse.ArgumentParser(prog='papago2gpx', description=\n 'Extract GPS data from MP4 video files created by PAPAGO! dashcams, and format them into a GPX file.'\n )\n parser.add_argument('input_paths', nargs='+', help=\n 'The path to an input file or directory.', metavar='INPUT_PATH')\n parser.add_argument('--name', help=\n 'The name of the GPX file to output. Default to 16 deciaml digits representing the first GPS record time.'\n , metavar='NAME')\n parser.add_argument('--description', help=\n 'The description of the GPX file to output.', metavar='DESCRIPTION'\n )\n parser.add_argument('--author-name', help=\n 'The name of the author of the GPX file to output.', metavar=\n 'AUTHOR_NAME')\n parser.add_argument('--author-email', help=\n 'The Email address of the author of the GPX file to output.',\n metavar='AUTHOR_EMAIL')\n parser.add_argument('--copyright', help=\n \"The copyright holder of the GPX file to output. Default to `AUTHOR_NAME'.\"\n , metavar='COPYRIGHT')\n parser.add_argument('--copyright-year', help=\n 'The copyright year of the GPX file to output. Default to the year the file is created.'\n , metavar='COPYRIGHT_YEAR')\n parser.add_argument('--copyright-license', help=\n 'A link to an external file containing license text.', metavar=\n 'LICENSE')\n parser.add_argument('--keywords', help=\n 'Keywords associated with the GPX file to output.', metavar=\n 'KEYWORDS')\n parser.add_argument('--track-name', help='The name of the track.',\n metavar='TRACK_NAME')\n parser.add_argument('--track-comment', help=\n 'The comment of the track.', metavar='TRACK_COMMENT')\n parser.add_argument('--track-description', help=\n 'The description of the track.', metavar='TRACK_DESCRIPTION')\n parser.add_argument('--track-type', default=_DEFAULT_TRACK_TYPE,\n help=f\"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.\")\n parser.add_argument('--uniq', choices=['first', 'last'], help=\n 'How to process different coordinates recorded at the same timestamp. Default to an error.'\n )\n parser.add_argument('--overwrite', action='store_true', help=\n 'Allow to overwrite an existing file.')\n args = parser.parse_args()\n self._input_paths = []\n for input_path in args.input_paths:\n input_path = pathlib.Path(input_path)\n if not input_path.exists():\n print(f'{input_path}: File does not exist.', file=sys.stderr)\n sys.exit(1)\n self._input_paths.append(input_path)\n self._name = args.name\n self._description = args.description\n self._author_name = args.author_name\n self._author_email = args.author_email\n self._copyright = args.copyright\n if self._copyright is None and self._author_name is not None:\n self._copyright = self._author_name\n self._copyright_year = args.copyright_year\n if self._copyright_year is not None and self._copyright is None:\n print(\"`--copyright-year' is specified, but `--copyright' is not.\",\n file=sys.stderr)\n sys.exit(1)\n if self._copyright_year is None and self._copyright is not None:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n self._copyright_year = local_aware_now.year\n self._copyright_license = args.copyright_license\n if self._copyright_license is not None and self._copyright is None:\n print(\n \"`--copyright-license' is specified, but `--copyright' is not.\"\n , file=sys.stderr)\n sys.exit(1)\n self._keywords = args.keywords\n self._track_name = args.track_name\n self._track_comment = args.track_comment\n self._track_description = args.track_description\n self._track_type = args.track_type\n if self._track_type is None:\n self._track_type = _DEFAULT_TRACK_TYPE\n if self._track_type == '':\n self._track_type = None\n self._how_to_unique = args.uniq\n self._overwrite = args.overwrite\n\n @property\n def input_paths(self) ->List[pathlib.Path]:\n return self._input_paths\n\n @property\n def name(self) ->Optional[str]:\n return self._name\n\n @property\n def description(self) ->Optional[str]:\n return self._description\n\n @property\n def author_name(self) ->Optional[str]:\n return self._author_name\n\n @property\n def author_email(self) ->Optional[str]:\n return self._author_email\n\n @property\n def copyright(self) ->Optional[str]:\n return self._copyright\n\n @property\n def copyright_year(self) ->Optional[int]:\n return self._copyright_year\n\n @property\n def copyright_license(self) ->Optional[str]:\n return self._copyright_license\n\n @property\n def keywords(self) ->Optional[str]:\n return self._keywords\n\n @property\n def track_name(self) ->Optional[str]:\n return self._track_name\n\n @property\n def track_comment(self) ->Optional[str]:\n return self._track_comment\n\n @property\n def track_description(self) ->Optional[str]:\n return self._track_description\n\n @property\n def track_type(self) ->Optional[str]:\n return self._track_type\n\n @property\n def how_to_unique(self) ->str:\n return self._how_to_unique\n\n @property\n def overwrite(self) ->bool:\n return self._overwrite\n\n\nclass BrokenMp4FileError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) ->int:\n return self._position\n\n @property\n def size(self) ->int:\n return self._size\n\n\ndef get_gps_data_block_indices(mp4_file: io.FileIO) ->List[GpsDataBlockIndex]:\n target_box_path = ['moov', 'gps ']\n while True:\n box_size = mp4_file.read(4)\n if len(box_size) == 0:\n raise GpsDataError(\n f'{mp4_file.name}: Could not find any GPS data block index.')\n if len(box_size) < 4:\n error_position = format(mp4_file.tell() - len(box_size), '#010x')\n raise BrokenMp4FileError(\n f'{mp4_file.name}:{error_position}: Expect the size of a box, but got EOF.'\n )\n box_size = int.from_bytes(box_size, 'big')\n box_type = mp4_file.read(4)\n if len(box_type) < 4:\n error_position = format(mp4_file.tell() - len(box_type), '#010x')\n raise BrokenMp4FileError(\n f'{mp4_file.name}:{error_position}: Expect the type of a box, but got EOF.'\n )\n box_type = box_type.decode('UTF-8')\n if box_size == 0:\n box_size = None\n next_position = None\n elif box_size == 1:\n box_size = mp4_file.read(8)\n if len(box_size) < 8:\n error_position = format(mp4_file.tell() - len(box_size),\n '#010x')\n raise BrokenMp4FileError(\n f'{mp4_file.name}:{error_position}: Expect the size of a box, but got EOF.'\n )\n box_size = int.from_bytes(box_size, 'big')\n next_position = mp4_file.tell() + box_size - 16\n else:\n next_position = mp4_file.tell() + box_size - 8\n if box_type == target_box_path[0]:\n target_box_path.pop(0)\n if len(target_box_path) == 0:\n break\n else:\n if next_position is None:\n raise GpsDataError(\n f'{mp4_file.name}: Could not find any GPS data block index.'\n )\n mp4_file.seek(next_position)\n if mp4_file.tell() != next_position:\n raise BrokenMp4FileError(\n f'{mp4_file.name}: The size of a box is not equal to the actual one.'\n )\n unknown = mp4_file.read(4)\n if len(unknown) < 4:\n error_position = format(mp4_file.tell() - len(unknown), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer, but got EOF.'\n )\n unknown = int.from_bytes(unknown, 'big')\n if unknown != 257:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.\"\n )\n gps_data_block_count = mp4_file.read(4)\n if len(gps_data_block_count) < 4:\n error_position = format(mp4_file.tell() - len(gps_data_block_count),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a big-endian 32-bit unsigned integer, but got EOF.'\n )\n gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')\n gps_data_block_indices = []\n for i in range(gps_data_block_count):\n position = mp4_file.read(4)\n if len(position) < 4:\n error_position = format(mp4_file.tell() - len(position), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the position of a GPS data block, but got EOF.'\n )\n position = int.from_bytes(position, 'big')\n if position < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the position of a GPS data block, but got an invalid value `{position}'.\"\n )\n size = mp4_file.read(4)\n if len(size) < 4:\n error_position = format(mp4_file.tell() - len(size), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'\n )\n size = int.from_bytes(size, 'big')\n if size < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got an invalid value `{size}'.\"\n )\n if position == 0 or size == 0:\n print(\n f'{mp4_file.name}: Warning: The index of GPS data blocks is not recorded.'\n , file=sys.stderr)\n else:\n gps_data_block_index = GpsDataBlockIndex(position, size)\n gps_data_block_indices.append(gps_data_block_index)\n if mp4_file.tell() != next_position:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(\n f'{mp4_file_path}:{error_position}: Expect EOF, but find additional data.'\n )\n return gps_data_block_indices\n\n\ndef read_little_endian_single(mp4_file: io.FileIO) ->float:\n data = mp4_file.read(4)\n if len(data) < 4:\n error_position = format(mp4_file.tell() - len(data), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a little-endian single-precision floating point number, but got EOF.'\n )\n data = int.from_bytes(data, 'little')\n sign = (data & 2147483648) >> 31\n exponent = ((data & 2139095040) >> 23) - 127\n mantissa = data & 8388607 | 8388608\n sign = '+' if sign == 0 else '-'\n exponent = str(exponent - 23)\n mantissa_hex = format(mantissa, '08x')\n return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')\n\n\nclass Time(object):\n\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) ->datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) ->str:\n result = self._time.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) ->bool:\n return self._degree < other._degree\n\n def __eq__(self, other) ->bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) ->str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n self._degree = degree\n\n def __repr__(self) ->str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude\n ], longitude: Optional[Longitude], speed: Speed, azimuth: Azimuth,\n x_acceleration: int, y_acceleration: int, z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, latitude = {latitude}'\n )\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError(\n f'Inconsistent arguments: status = {status}, longitude = {longitude}'\n )\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) ->Time:\n return self._time\n\n @property\n def status(self) ->str:\n return self._status\n\n @property\n def latitude(self) ->Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) ->Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) ->Speed:\n return self._speed\n\n @property\n def azimuth(self) ->Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) ->int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) ->int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) ->int:\n return self._z_acceleration\n\n @property\n def name(self) ->str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) ->str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) ->str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) ->bool:\n return self._time < other._time\n\n def __eq__(self, other) ->bool:\n return (self._time == other._time and self._latitude == other.\n _latitude and self._longitude == other._longitude)\n\n\nclass TrackSegment(object):\n\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) ->None:\n self._track_points.append(track_point)\n\n def __len__(self) ->int:\n return len(self._track_points)\n\n def __iter__(self) ->Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n_UNKNOWN_BYTES = (\n b'\\x00!\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\xbc\\xc7\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00<\\xdb\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x18\\xb5\\x18\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\xa0\\xfe\\x19\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00 \\xf9\\x1b\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\xac\\xb3\\x1c\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00'\n )\n\n\ndef parse_mp4_file(mp4_file_path: pathlib.Path) ->List[TrackPoint]:\n track_points = []\n with open(mp4_file_path, 'rb') as mp4_file:\n gps_data_block_indices = get_gps_data_block_indices(mp4_file)\n for gps_data_block_index in gps_data_block_indices:\n mp4_file.seek(gps_data_block_index.position)\n if mp4_file.tell() != gps_data_block_index.position:\n error_position = gps_data_block_index.position\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a GPS data block, but got EOF.'\n )\n large_block_size = mp4_file.read(4)\n if len(large_block_size) < 4:\n error_position = format(mp4_file.tell() - len(\n large_block_size), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'\n )\n large_block_size = int.from_bytes(large_block_size, 'big')\n if large_block_size != gps_data_block_index.size:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f'{mp4_file_path}:{error_position}: The size of a GPS data block is not equal to the one stored in the index.'\n )\n large_block_end = mp4_file.tell() - 4 + large_block_size\n signature = mp4_file.read(8)\n if len(signature) < 8:\n error_position = format(mp4_file.tell() - len(signature),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the signature of a GPS data block, but got EOF.'\n )\n signature = signature.decode('UTF-8')\n if signature != 'freeGPS ':\n error_position = format(mp4_file.tell() - 8, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `freeGPS ' as the signature of a GPS data block, but got `{signature}'.\"\n )\n small_block_size = mp4_file.read(4)\n if len(small_block_size) < 4:\n error_position = format(mp4_file.tell() - len(\n small_block_size), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the size of a GPS data block, but got EOF.'\n )\n small_block_size = int.from_bytes(small_block_size, 'little')\n if small_block_size != 88:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `88' as the size of a GPS data block, but got `{small_block_size}'.\"\n )\n small_block_end = mp4_file.tell() + small_block_size\n padding = mp4_file.read(32)\n if len(padding) < 32:\n error_position = format(mp4_file.tell() - len(padding), '#010x'\n )\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 32 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n hour = mp4_file.read(4)\n if len(hour) < 4:\n error_position = format(mp4_file.tell() - len(hour), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the hour of time, but got EOF.'\n )\n hour = int.from_bytes(hour, 'little')\n if hour < 0 or 24 <= hour:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the hour of time, but got an invalid value `{hour}'.\"\n )\n minute = mp4_file.read(4)\n if len(minute) < 4:\n error_position = format(mp4_file.tell() - len(minute), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the minute of time, but got EOF.'\n )\n minute = int.from_bytes(minute, 'little')\n if minute < 0 or 60 <= minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the minute of time, but got an invalid value `{minute}'.\"\n )\n second = mp4_file.read(4)\n if len(second) < 4:\n error_position = format(mp4_file.tell() - len(second), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the second of time, but got EOF.'\n )\n second = int.from_bytes(second, 'little')\n if second < 0 or 60 <= second:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the second of time, but got an invalid value `{second}'.\"\n )\n year = mp4_file.read(4)\n if len(year) < 4:\n error_position = format(mp4_file.tell() - len(year), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the year of time, but got EOF.'\n )\n year = int.from_bytes(year, 'little')\n if year == 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n if hour != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year == 0' but `hour != 0'.\"\n )\n if minute != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year == 0' but `minute != 0'.\"\n )\n if second != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year == 0' but `second != 0'.\"\n )\n else:\n year += 2000\n month = mp4_file.read(4)\n if len(month) < 4:\n error_position = format(mp4_file.tell() - len(month), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the month of time, but got EOF.'\n )\n month = int.from_bytes(month, 'little')\n if month == 0:\n if year != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year != 0' but `month == 0'.\"\n )\n assert hour == 0\n assert minute == 0\n assert second == 0\n elif month < 1 or 12 < month:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the month of time, but got an invalid value `{month}'.\"\n )\n day = mp4_file.read(4)\n if len(day) < 4:\n error_position = format(mp4_file.tell() - len(day), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the day of time, but got EOF.'\n )\n day = int.from_bytes(day, 'little')\n if day == 0:\n if year != 0:\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: `year != 0' but `day == 0'.\"\n )\n assert month == 0\n assert hour == 0\n assert minute == 0\n assert second == 0\n elif day < 1 or 31 < day:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect the day of time, but got an invalid value `{day}'.\"\n )\n if year == 0:\n assert month == 0\n assert day == 0\n assert hour == 0\n assert minute == 0\n assert second == 0\n time = None\n else:\n time = datetime.datetime.now(datetime.timezone.utc)\n time = time.astimezone()\n time = time.replace(year=year, month=month, day=day, hour=\n hour, minute=minute, second=second, microsecond=0)\n time = Time(time)\n if time is None:\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero-padding, but got EOF.'\n )\n padding = int.from_bytes(padding, 'little')\n if padding != 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero-padding, but got `{padding}'.\"\n )\n status = None\n latitude_type = '0'\n longitude_type = '0'\n else:\n status = mp4_file.read(1)\n if len(status) < 1:\n error_position = format(mp4_file.tell() - len(status),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a status character, but got EOF.'\n )\n status = status.decode('UTF-8')\n if status not in ('A', 'V'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `A' or `V' as a status character, but got an invalid character `{status}'.\"\n )\n latitude_type = mp4_file.read(1)\n if len(latitude_type) < 1:\n error_position = format(mp4_file.tell() - len(\n latitude_type), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a latitude type, but got EOF.'\n )\n latitude_type = latitude_type.decode('UTF-8')\n if status == 'A':\n if latitude_type not in ('N', 'S'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `N' or `S' as a latitude type, but got an invalid character `{latitude_type}'.\"\n )\n else:\n assert status == 'V'\n if latitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `0' as a latitude type, but got an invalid character `{latitude_type}'.\"\n )\n longitude_type = mp4_file.read(1)\n if len(longitude_type) < 1:\n error_position = format(mp4_file.tell() - len(\n longitude_type), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a longitude type, but got EOF.'\n )\n longitude_type = longitude_type.decode('UTF-8')\n if status == 'A':\n if longitude_type not in ('E', 'W'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `E' or `W' as a longitude type, but got an invalid character `{longitude_type}'.\"\n )\n else:\n assert status == 'V'\n if longitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `0' as a longitude type, but got an invalid character `{longitude_type}'.\"\n )\n padding = mp4_file.read(1)\n if len(padding) < 1:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n if padding[0] != 0:\n error_position = format(mp4_file.tell() - 1, '#010x')\n byte = format(padding[0], '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n if status == 'A':\n latitude_dmm = read_little_endian_single(mp4_file)\n latitude_degree = math.floor(latitude_dmm / 100)\n if latitude_degree < 0 or 90 < latitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\"\n )\n latitude_minute = latitude_dmm - latitude_degree * 100\n if latitude_minute < 0 or 60 <= latitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\"\n )\n latitude_degree += latitude_minute / 60\n latitude = Latitude(latitude_degree)\n else:\n assert status == 'V' or status is None\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 4 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n latitude = None\n if status == 'A':\n longitude_dmm = read_little_endian_single(mp4_file)\n longitude_degree = math.floor(longitude_dmm / 100)\n if longitude_degree < 0 or 180 < longitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a longitude in DMM format, but got an invalid value `{longitude_dmm}'.\"\n )\n longitude_minute = longitude_dmm - longitude_degree * 100\n if longitude_minute < 0 or 60 <= longitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect a longitude in DMM format, but got an invalid value `{longitude_dmm}'.\"\n )\n longitude_degree += longitude_minute / 60\n longitude = Longitude(longitude_degree)\n else:\n assert status == 'V' or status is None\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect zero padding, but got EOF.'\n )\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 4 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n longitude = None\n speed = read_little_endian_single(mp4_file)\n speed *= 1852 / 3600\n speed = Speed(speed)\n azimuth = read_little_endian_single(mp4_file)\n if azimuth < 0 or 360 <= azimuth:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect azimuth degree, but got an invalid value `{azimuth}'.\"\n )\n azimuth = Azimuth(azimuth)\n x_acceleration = mp4_file.read(4)\n if len(x_acceleration) < 4:\n error_position = format(mp4_file.tell() - len(\n x_acceleration), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect X-axis acceleration, but got EOF.'\n )\n x_acceleration = int.from_bytes(x_acceleration, 'little',\n signed=True)\n y_acceleration = mp4_file.read(4)\n if len(y_acceleration) < 4:\n error_position = format(mp4_file.tell() - len(\n y_acceleration), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect Y-axis acceleration, but got EOF.'\n )\n y_acceleration = int.from_bytes(y_acceleration, 'little',\n signed=True)\n z_acceleration = mp4_file.read(4)\n if len(z_acceleration) < 4:\n error_position = format(mp4_file.tell() - len(\n z_acceleration), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect Z-axis acceleration, but got EOF.'\n )\n z_acceleration = int.from_bytes(z_acceleration, 'little',\n signed=True)\n if mp4_file.tell() != small_block_end:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect the end of a GPS data block, but got additional data.'\n )\n padding_size = large_block_end - small_block_end\n if padding_size < 532:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect more than or equal to 532-byte padding, but got only {padding_size}-byte padding.'\n )\n padding = mp4_file.read(padding_size)\n if len(padding) < padding_size:\n error_position = format(mp4_file.tell() - len(padding), '#010x'\n )\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect {padding_size}-byte padding, but got EOF.'\n )\n for j, b in enumerate(padding[:420]):\n if b != 0:\n error_position = format(small_block_end + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n if padding[420:532] != _UNKNOWN_BYTES:\n for j, b in enumerate(padding[420:532]):\n if b != 0:\n error_position = format(small_block_end + 420 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n for j, b in enumerate(padding[532:]):\n if b != 0:\n error_position = format(small_block_end + 532 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect zero padding, but got an invalid byte `{byte}'.\"\n )\n track_point = TrackPoint(time, status, latitude, longitude,\n speed, azimuth, x_acceleration, y_acceleration, z_acceleration)\n track_points.append(track_point)\n return track_points\n\n\ndef read_input_paths(input_paths: List[pathlib.Path]) ->List[TrackPoint]:\n track_points = []\n for input_path in input_paths:\n if input_path.is_dir():\n file_paths = []\n for dirpath, dirnames, filenames in os.walk(input_path):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n file_path = dirpath / filename\n if file_path.suffix not in ('.mp4', '.MP4'):\n continue\n file_paths.append(file_path)\n file_paths.sort()\n for file_path in file_paths:\n track_points.extend(parse_mp4_file(file_path))\n else:\n track_points.extend(parse_mp4_file(input_path))\n return track_points\n\n\ndef write_csv_file(args: Arguments, track_points: List[TrackPoint]\n ) ->pathlib.Path:\n if args.name is None:\n print(\"`--name' is required to output a CSV file.\", file=sys.stderr)\n sys.exit(1)\n csv_file_path = pathlib.Path(f'{args.name}.csv')\n if csv_file_path.exists():\n if not args.overwrite:\n print(f'{csv_file_path}: File already exists.', file=sys.stderr)\n sys.exit(1)\n with open(csv_file_path, 'w') as csv_file:\n for track_point in track_points:\n print(track_point.format_as_csv(), file=csv_file)\n return csv_file_path\n\n\ndef create_track_segments(args: Arguments, track_points: List[TrackPoint]\n ) ->List[TrackSegment]:\n new_track_points = []\n for track_point in track_points:\n if track_point.status != 'A':\n assert track_point.latitude is None\n assert track_point.longitude is None\n continue\n assert track_point.latitude is not None\n assert track_point.longitude is not None\n new_track_points.append(track_point)\n track_points = new_track_points\n track_points.sort()\n if len(track_points) == 0:\n return []\n unique_track_points = []\n it = iter(track_points)\n representative_track_point = next(it)\n while True:\n track_point = next(it, None)\n if track_point is None:\n unique_track_points.append(representative_track_point)\n break\n if track_point.time != representative_track_point.time:\n unique_track_points.append(representative_track_point)\n representative_track_point = track_point\n continue\n if (track_point.latitude == representative_track_point.latitude and\n track_point.longitude == representative_track_point.longitude):\n continue\n if args.how_to_unique == 'first':\n continue\n elif args.how_to_unique == 'last':\n representative_track_point = track_point\n else:\n raise RuntimeError(\n \"There exist track points with the same timestamp but different coordinates. Use `--uniq' option.\"\n )\n track_segments = []\n track_segments.append(TrackSegment())\n for track_point in unique_track_points:\n track_segments[0].append_track_point(track_point)\n return track_segments\n\n\ndef as_xml_attribute(data: str) ->str:\n return xml.sax.saxutils.quoteattr(data)\n\n\ndef as_xml_data(data: str) ->str:\n return xml.sax.saxutils.escape(data)\n\n\ndef get_local_time_in_iso8601() ->str:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('([+-]\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', local_time_in_iso8601)\n\n\ndef write_gpx_file(args: Arguments, track_segments: List[TrackSegment]\n ) ->pathlib.Path:\n all_track_points = []\n for track_segment in track_segments:\n for track_point in track_segment:\n all_track_points.append(track_point)\n name = args.name\n if name is None:\n if len(all_track_points) == 0:\n raise ValueError(\n \"`--name' is not specified, and there is no track point.\")\n all_track_points.sort()\n name = all_track_points[0].name\n gpx_file_path = pathlib.Path(f'{name}.gpx')\n bounds = None\n if len(all_track_points) > 0:\n latitudes = list(t.latitude for t in all_track_points)\n latitudes.sort()\n longitudes = list(t.longitude for t in all_track_points)\n longitudes.sort()\n bounds = latitudes[0], longitudes[0], latitudes[-1], longitudes[-1]\n if gpx_file_path.exists():\n if not args.overwrite:\n print(f'{gpx_file_path}: Error: File already exists.', file=sys\n .stderr)\n sys.exit(1)\n with open(gpx_file_path, 'w') as gpx_file:\n print('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>',\n file=gpx_file)\n print(\n '<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" version=\"1.1\" creator=\"papago2gpx\">'\n , file=gpx_file)\n print(' <metadata>', file=gpx_file)\n print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)\n if args.description is not None:\n description = as_xml_data(args.description)\n print(f' <desc>{description}</desc>', file=gpx_file)\n if args.author_name is not None or args.author_email is not None:\n print(' <author>', file=gpx_file)\n if args.author_name is not None:\n author_name = as_xml_data(args.author_name)\n print(f' <name>{author_name}</name>', file=gpx_file)\n if args.author_email is not None:\n author_email_parts = args.author_email.split('@', 1)\n if len(author_email_parts) != 2:\n raise RuntimeError(\n f'An invalid E-mail address: {args.author_email}')\n author_email_id = as_xml_attribute(author_email_parts[0])\n author_email_domain = as_xml_attribute(author_email_parts[1])\n print(\n f' <email id={author_email_id} domain={author_email_domain}/>'\n , file=gpx_file)\n print(' </author>', file=gpx_file)\n if args.copyright is not None:\n copyright = as_xml_attribute(args.copyright)\n print(f' <copyright author={copyright}', end='', file=gpx_file)\n copyright_year = args.copyright_year\n copyright_license = args.copyright_license\n if copyright_year is not None or copyright_license is not None:\n print('>', file=gpx_file)\n if copyright_year is not None:\n copyright_year = as_xml_data(str(copyright_year))\n print(f' <year>{copyright_year}</year>', file=gpx_file\n )\n if copyright_license is not None:\n copyright_license = as_xml_data(copyright_license)\n print(f' <license>{copyright_license}</license>',\n file=gpx_file)\n print(' </copyright>', file=gpx_file)\n else:\n print('/>', file=gpx_file)\n print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)\n if args.keywords is not None:\n keywords = as_xml_data(args.keywords)\n print(f' <keywords>{keywords}</keywords>', file=gpx_file)\n if bounds is not None:\n print(\n f' <bounds minlat=\"{bounds[0]}\" minlon=\"{bounds[1]}\" maxlat=\"{bounds[2]}\" maxlon=\"{bounds[3]}\"/>'\n , file=gpx_file)\n print(' </metadata>', file=gpx_file)\n print(' <trk>', file=gpx_file)\n if args.track_name is not None:\n track_name = as_xml_data(args.track_name)\n print(f' <name>{track_name}</name>', file=gpx_file)\n if args.track_comment is not None:\n track_comment = as_xml_data(args.track_comment)\n print(f' <cmt>{track_comment}</cmt>', file=gpx_file)\n if args.track_description is not None:\n track_description = as_xml_data(args.track_description)\n print(f' <desc>{track_description}</desc>', file=gpx_file)\n if args.track_type is not None:\n track_type = as_xml_data(args.track_type)\n print(f' <type>{track_type}</type>', file=gpx_file)\n for track_segment in track_segments:\n print(' <trkseg>', file=gpx_file)\n for track_point in track_segment:\n print(\n f' <trkpt lat=\"{track_point.latitude}\" lon=\"{track_point.longitude}\">'\n , file=gpx_file)\n print(f' <time>{track_point.time}</time>', file=gpx_file\n )\n print(' </trkpt>', file=gpx_file)\n print(' </trkseg>', file=gpx_file)\n print(' </trk>', file=gpx_file)\n print('</gpx>', file=gpx_file)\n proc = subprocess.run(['xmllint', '--schema', 'gpx.xsd', str(\n gpx_file_path)], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='UTF-8')\n if proc.returncode != 0:\n print(\n f\"\"\"Failed to validate the GPX file `{gpx_file_path}'.\ncommand: {proc.args}\nstdout: {proc.stdout}\nstderr: {proc.stderr}\nreturncode: {proc.returncode}\"\"\"\n , file=sys.stderr)\n return gpx_file_path\n\n\nif __name__ == '__main__':\n args = Arguments()\n track_points = read_input_paths(args.input_paths)\n csv_file_path = write_csv_file(args, track_points)\n print(f\"Succeeded! The result is output to `{csv_file_path}'.\")\n track_segments = create_track_segments(args, track_points)\n if args.name is None and len(track_segments) == 0:\n print(\"`--name' is not specified, and there is no track segment.\",\n file=sys.stderr)\n sys.exit(1)\n if len(track_segments) == 0:\n print('WARNING: There is no track segment.', file=sys.stderr)\n gpx_file_path = write_gpx_file(args, track_segments)\n print(f\"Succeeded! The result is output to `{gpx_file_path}'.\")\n sys.exit(0)\n",
"step-5": "#!/usr/bin/env python3\n\nimport re\nimport datetime\nimport math\nimport pathlib\nimport os\nimport io\nimport argparse\nimport subprocess\nimport xml.sax.saxutils\nfrom typing import (Optional, List, Iterable)\nimport sys\n\n\n_DEFAULT_TRACK_TYPE = 'Dashcam track'\n\n\nclass Arguments(object):\n def __init__(self):\n parser = argparse.ArgumentParser(\n prog='papago2gpx', description='Extract GPS data from MP4 video\\\n files created by PAPAGO! dashcams, and format them into a GPX file.')\n parser.add_argument('input_paths', nargs='+',\n help='The path to an input file or directory.',\n metavar='INPUT_PATH')\n parser.add_argument('--name', help='The name of the GPX file to\\\n output. Default to 16 deciaml digits representing the first GPS record time.',\n metavar='NAME')\n parser.add_argument('--description', help='The description of the GPX\\\n file to output.', metavar='DESCRIPTION')\n parser.add_argument('--author-name', help='The name of the author of\\\n the GPX file to output.', metavar='AUTHOR_NAME')\n parser.add_argument('--author-email', help='The Email address of the\\\n author of the GPX file to output.', metavar='AUTHOR_EMAIL')\n parser.add_argument('--copyright', help=\"The copyright holder of the\\\n GPX file to output. Default to `AUTHOR_NAME'.\", metavar='COPYRIGHT')\n parser.add_argument('--copyright-year', help=\"The copyright year of\\\n the GPX file to output. Default to the year the file is created.\",\n metavar='COPYRIGHT_YEAR')\n parser.add_argument('--copyright-license', help='A link to an external\\\n file containing license text.', metavar='LICENSE')\n parser.add_argument('--keywords', help='Keywords associated with the\\\n GPX file to output.', metavar='KEYWORDS')\n parser.add_argument('--track-name', help='The name of the track.',\n metavar='TRACK_NAME')\n parser.add_argument(\n '--track-comment', help='The comment of the track.',\n metavar='TRACK_COMMENT')\n parser.add_argument('--track-description', help=\"The description of\\\n the track.\", metavar='TRACK_DESCRIPTION')\n parser.add_argument(\n '--track-type', default=_DEFAULT_TRACK_TYPE,\n help=f\"The type of the track. Default to `{_DEFAULT_TRACK_TYPE}'.\")\n parser.add_argument('--uniq', choices=['first', 'last'],\n help='How to process different coordinates\\\n recorded at the same timestamp. Default to an error.')\n parser.add_argument('--overwrite', action='store_true',\n help='Allow to overwrite an existing file.')\n\n args = parser.parse_args()\n\n self._input_paths = []\n for input_path in args.input_paths:\n input_path = pathlib.Path(input_path)\n if not input_path.exists():\n print(f\"{input_path}: File does not exist.\", file=sys.stderr)\n sys.exit(1)\n self._input_paths.append(input_path)\n\n self._name = args.name\n\n self._description = args.description\n\n self._author_name = args.author_name\n\n self._author_email = args.author_email\n\n self._copyright = args.copyright\n if self._copyright is None and self._author_name is not None:\n self._copyright = self._author_name\n\n self._copyright_year = args.copyright_year\n if self._copyright_year is not None and self._copyright is None:\n print(\"`--copyright-year' is specified, but `--copyright' is not.\",\n file=sys.stderr)\n sys.exit(1)\n if self._copyright_year is None and self._copyright is not None:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n self._copyright_year = local_aware_now.year\n\n self._copyright_license = args.copyright_license\n if self._copyright_license is not None and self._copyright is None:\n print(\"`--copyright-license' is specified, but `--copyright' is\\\n not.\", file=sys.stderr)\n sys.exit(1)\n\n self._keywords = args.keywords\n\n self._track_name = args.track_name\n\n self._track_comment = args.track_comment\n\n self._track_description = args.track_description\n\n self._track_type = args.track_type\n if self._track_type is None:\n self._track_type = _DEFAULT_TRACK_TYPE\n if self._track_type == '':\n self._track_type = None\n\n self._how_to_unique = args.uniq\n\n self._overwrite = args.overwrite\n\n @property\n def input_paths(self) -> List[pathlib.Path]:\n return self._input_paths\n\n @property\n def name(self) -> Optional[str]:\n return self._name\n\n @property\n def description(self) -> Optional[str]:\n return self._description\n\n @property\n def author_name(self) -> Optional[str]:\n return self._author_name\n\n @property\n def author_email(self) -> Optional[str]:\n return self._author_email\n\n @property\n def copyright(self) -> Optional[str]:\n return self._copyright\n\n @property\n def copyright_year(self) -> Optional[int]:\n return self._copyright_year\n\n @property\n def copyright_license(self) -> Optional[str]:\n return self._copyright_license\n\n @property\n def keywords(self) -> Optional[str]:\n return self._keywords\n\n @property\n def track_name(self) -> Optional[str]:\n return self._track_name\n\n @property\n def track_comment(self) -> Optional[str]:\n return self._track_comment\n\n @property\n def track_description(self) -> Optional[str]:\n return self._track_description\n\n @property\n def track_type(self) -> Optional[str]:\n return self._track_type\n\n @property\n def how_to_unique(self) -> str:\n return self._how_to_unique\n\n @property\n def overwrite(self) -> bool:\n return self._overwrite\n\n\nclass BrokenMp4FileError(RuntimeError):\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataError(RuntimeError):\n def __init__(self, message: str):\n super().__init__(message)\n\n\nclass GpsDataBlockIndex(object):\n def __init__(self, position: int, size: int):\n if position <= 0:\n raise ValueError(f\"An invalid position: `{position}'.\")\n if size <= 0:\n raise ValueError(f\"An invalid size: `{size}'.\")\n self._position = position\n self._size = size\n\n @property\n def position(self) -> int:\n return self._position\n\n @property\n def size(self) -> int:\n return self._size\n\n\ndef get_gps_data_block_indices(mp4_file: io.FileIO) -> List[GpsDataBlockIndex]:\n target_box_path = ['moov', 'gps ']\n while True:\n box_size = mp4_file.read(4)\n if len(box_size) == 0:\n raise GpsDataError(\n f'{mp4_file.name}: Could not find any GPS data block index.')\n if len(box_size) < 4:\n error_position = format(mp4_file.tell() - len(box_size), '#010x')\n raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\\\n Expect the size of a box, but got EOF.')\n box_size = int.from_bytes(box_size, 'big')\n\n box_type = mp4_file.read(4)\n if len(box_type) < 4:\n error_position = format(mp4_file.tell() - len(box_type), '#010x')\n raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\\\n Expect the type of a box, but got EOF.')\n box_type = box_type.decode('UTF-8')\n\n if box_size == 0:\n box_size = None\n next_position = None\n elif box_size == 1:\n box_size = mp4_file.read(8)\n if len(box_size) < 8:\n error_position = format(mp4_file.tell() - len(box_size),\n '#010x')\n raise BrokenMp4FileError(f'{mp4_file.name}:{error_position}:\\\n Expect the size of a box, but got EOF.')\n box_size = int.from_bytes(box_size, 'big')\n next_position = mp4_file.tell() + box_size - 16\n else:\n next_position = mp4_file.tell() + box_size - 8\n\n if box_type == target_box_path[0]:\n target_box_path.pop(0)\n if len(target_box_path) == 0:\n break\n else:\n if next_position is None:\n raise GpsDataError(f'{mp4_file.name}: Could not find any GPS'\n ' data block index.')\n mp4_file.seek(next_position)\n if mp4_file.tell() != next_position:\n raise BrokenMp4FileError(f'{mp4_file.name}: The size of a box\\\n is not equal to the actual one.')\n\n unknown = mp4_file.read(4)\n if len(unknown) < 4:\n error_position = format(mp4_file.tell() - len(unknown), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'\n ' big-endian 32-bit unsigned integer, but got EOF.')\n unknown = int.from_bytes(unknown, 'big')\n if unknown != 257:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect a\\\n big-endian 32-bit unsigned integer with value `257', but got `{unknown}'.\")\n\n gps_data_block_count = mp4_file.read(4)\n if len(gps_data_block_count) < 4:\n error_position = format(mp4_file.tell() - len(gps_data_block_count),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a'\n ' big-endian 32-bit unsigned integer, but got EOF.')\n gps_data_block_count = int.from_bytes(gps_data_block_count, 'big')\n\n gps_data_block_indices = []\n for i in range(gps_data_block_count):\n position = mp4_file.read(4)\n if len(position) < 4:\n error_position = format(mp4_file.tell() - len(position), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'\n ' position of a GPS data block, but got EOF.')\n position = int.from_bytes(position, 'big')\n if position < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect the\\\n position of a GPS data block, but got an invalid value `{position}'.\")\n\n size = mp4_file.read(4)\n if len(size) < 4:\n error_position = format(mp4_file.tell() - len(size), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect the'\n ' size of a GPS data block, but got EOF.')\n size = int.from_bytes(size, 'big')\n if size < 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect the\\\n size of a GPS data block, but got an invalid value `{size}'.\")\n\n if position == 0 or size == 0:\n print(f'{mp4_file.name}: Warning: The index of GPS data blocks is\\\n not recorded.', file=sys.stderr)\n else:\n gps_data_block_index = GpsDataBlockIndex(position, size)\n gps_data_block_indices.append(gps_data_block_index)\n\n if mp4_file.tell() != next_position:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(f'{mp4_file_path}:{error_position}: Expect EOF, but'\n ' find additional data.')\n\n return gps_data_block_indices\n\n\ndef read_little_endian_single(mp4_file: io.FileIO) -> float:\n data = mp4_file.read(4)\n if len(data) < 4:\n error_position = format(mp4_file.tell() - len(data), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect a\\\n little-endian single-precision floating point number, but got EOF.')\n data = int.from_bytes(data, 'little')\n\n sign = (data & 0x80000000) >> 31\n exponent = ((data & 0x7F800000) >> 23) - 127\n mantissa = (data & 0x007FFFFF) | 0x00800000\n\n sign = '+' if sign == 0 else '-'\n exponent = str(exponent - 23)\n mantissa_hex = format(mantissa, '08x')\n return float.fromhex(f'{sign}0x{mantissa_hex}p{exponent}')\n\n\nclass Time(object):\n def __init__(self, time: datetime.datetime):\n if time.tzinfo is None:\n raise ValueError(\n \"Expect an aware `datetime' object, but got naive one.\")\n\n self._time = time.astimezone(datetime.timezone.utc)\n\n def as_local_time(self) -> datetime.datetime:\n return self._time.astimezone()\n\n def __repr__(self) -> str:\n result = self._time.strftime(\"%Y-%m-%dT%H:%M:%S%z\")\n return re.sub('(\\\\+\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', result)\n\n def __lt__(self, other) -> bool:\n return self._time < other._time\n\n def __eq__(self, other) -> bool:\n return self._time == other._time\n\n\nclass Latitude(object):\n def __init__(self, degree: float):\n if degree < -90 or 90 < degree:\n raise ValueError(\"An invalid latitude degree: `{degree}'.\")\n\n self._degree = degree\n\n def __repr__(self) -> str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) -> bool:\n return self._degree < other._degree\n\n def __eq__(self, other) -> bool:\n return self._degree == other._degree\n\n\nclass Longitude(object):\n def __init__(self, degree: float):\n if degree < -180 or 180 < degree:\n raise ValueError(\"An invalid longitude degree: `{degree}'.\")\n\n self._degree = degree\n\n def __repr__(self) -> str:\n return format(self._degree, '.6F')\n\n def __lt__(self, other) -> bool:\n return self._degree < other._degree\n\n def __eq__(self, other) -> bool:\n return self._degree == other._degree\n\n\nclass Speed(object):\n def __init__(self, meter_per_second: float):\n self._meter_per_second = meter_per_second\n\n def __repr__(self) -> str:\n return format(self._meter_per_second, '.2F')\n\n\nclass Azimuth(object):\n def __init__(self, degree: float):\n if degree < 0 or 360 <= degree:\n raise ValueError(f\"An invalid azimuth degree: `{degree}'.\")\n\n self._degree = degree\n\n def __repr__(self) -> str:\n return format(self._degree, '.2F')\n\n\nclass TrackPoint(object):\n def __init__(self, time: Time, status: str, latitude: Optional[Latitude],\n longitude: Optional[Longitude], speed: Speed,\n azimuth: Azimuth, x_acceleration: int, y_acceleration: int,\n z_acceleration: int):\n if (status == 'V' or status is None) != (latitude is None):\n raise ValueError('Inconsistent arguments:'\n f' status = {status}, latitude = {latitude}')\n if (status == 'V' or status is None) != (longitude is None):\n raise ValueError('Inconsistent arguments:'\n f' status = {status}, longitude = {longitude}')\n\n self._time = time\n self._status = status\n self._latitude = latitude\n self._longitude = longitude\n self._speed = speed\n self._azimuth = azimuth\n self._x_acceleration = x_acceleration\n self._y_acceleration = y_acceleration\n self._z_acceleration = z_acceleration\n\n @property\n def time(self) -> Time:\n return self._time\n\n @property\n def status(self) -> str:\n return self._status\n\n @property\n def latitude(self) -> Optional[Latitude]:\n return self._latitude\n\n @property\n def longitude(self) -> Optional[Longitude]:\n return self._longitude\n\n @property\n def speed(self) -> Speed:\n return self._speed\n\n @property\n def azimuth(self) -> Azimuth:\n return self._azimuth\n\n @property\n def x_acceleration(self) -> int:\n return self._x_acceleration\n\n @property\n def y_acceleration(self) -> int:\n return self._y_acceleration\n\n @property\n def z_acceleration(self) -> int:\n return self._z_acceleration\n\n @property\n def name(self) -> str:\n local_time = self._time.as_local_time()\n return local_time.strftime('%Y%m%d%H%M%S')\n\n def format_as_csv(self) -> str:\n if self._time is not None:\n local_time = self._time.as_local_time()\n result = local_time.strftime('%Y/%m/%d %H:%M:%S')\n else:\n result = ''\n status = self._status if self._status is not None else ''\n result += f',{status}'\n latitude = str(self._latitude) if self._latitude is not None else ''\n result += f',{latitude}'\n longitude = str(self._longitude) if self._longitude is not None else ''\n result += f',{longitude}'\n result += f',{self._speed}'\n result += f',{self._azimuth}'\n result += f',{self._x_acceleration}'\n result += f',{self._y_acceleration}'\n result += f',{self._z_acceleration}'\n return result\n\n def __repr__(self) -> str:\n latitude = str(self._latitude) if self._latitude is not None else ''\n longitude = str(self._longitude) if self._longitude is not None else ''\n return f'{self._time},{latitude},{longitude}'\n\n def __lt__(self, other) -> bool:\n return self._time < other._time\n\n def __eq__(self, other) -> bool:\n return self._time == other._time and self._latitude == other._latitude\\\n and self._longitude == other._longitude\n\n\nclass TrackSegment(object):\n def __init__(self):\n self._track_points = []\n\n def append_track_point(self, track_point: TrackPoint) -> None:\n self._track_points.append(track_point)\n\n def __len__(self) -> int:\n return len(self._track_points)\n\n def __iter__(self) -> Iterable[TrackPoint]:\n return iter(self._track_points)\n\n\n_UNKNOWN_BYTES\\\n = b'\\x00\\x21\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\xBC\\xC7\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x3C\\xDB\\x17\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x18\\xB5\\x18\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\xA0\\xFE\\x19\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x20\\xF9\\x1B\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x01\\x00\\x00\\x00\\\n\\xAC\\xB3\\x1C\\x00\\x00\\x00\\x00\\x00\\x80\\x01\\x00\\x00\\x00\\x00\\x00\\x00'\n\n\ndef parse_mp4_file(mp4_file_path: pathlib.Path) -> List[TrackPoint]:\n track_points = []\n\n with open(mp4_file_path, 'rb') as mp4_file:\n gps_data_block_indices = get_gps_data_block_indices(mp4_file)\n\n for gps_data_block_index in gps_data_block_indices:\n mp4_file.seek(gps_data_block_index.position)\n if mp4_file.tell() != gps_data_block_index.position:\n error_position = gps_data_block_index.position\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' a GPS data block, but got EOF.')\n\n large_block_size = mp4_file.read(4)\n if len(large_block_size) < 4:\n error_position = format(\n mp4_file.tell() - len(large_block_size), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the size of a GPS data block, but got EOF.')\n large_block_size = int.from_bytes(large_block_size, 'big')\n if large_block_size != gps_data_block_index.size:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f'{mp4_file_path}:{error_position}: The\\\n size of a GPS data block is not equal to the one stored in the index.')\n\n large_block_end = mp4_file.tell() - 4 + large_block_size\n\n signature = mp4_file.read(8)\n if len(signature) < 8:\n error_position = format(mp4_file.tell() - len(signature),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the signature of a GPS data block, but got EOF.')\n signature = signature.decode('UTF-8')\n if signature != 'freeGPS ':\n error_position = format(mp4_file.tell() - 8, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n `freeGPS ' as the signature of a GPS data block, but got `{signature}'.\")\n\n small_block_size = mp4_file.read(4)\n if len(small_block_size) < 4:\n error_position = format(\n mp4_file.tell() - len(small_block_size), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the size of a GPS data block, but got EOF.')\n small_block_size = int.from_bytes(small_block_size, 'little')\n if small_block_size != 88:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n `88' as the size of a GPS data block, but got `{small_block_size}'.\")\n\n small_block_end = mp4_file.tell() + small_block_size\n\n padding = mp4_file.read(32)\n if len(padding) < 32:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' zero padding, but got EOF.')\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(mp4_file.tell() - 32 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n\n hour = mp4_file.read(4)\n if len(hour) < 4:\n error_position = format(mp4_file.tell() - len(hour), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the hour of time, but got EOF.')\n hour = int.from_bytes(hour, 'little')\n if hour < 0 or 24 <= hour:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the hour of time, but got an invalid value `{hour}'.\")\n\n minute = mp4_file.read(4)\n if len(minute) < 4:\n error_position = format(mp4_file.tell() - len(minute), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the minute of time, but got EOF.')\n minute = int.from_bytes(minute, 'little')\n if minute < 0 or 60 <= minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the minute of time, but got an invalid value `{minute}'.\")\n\n second = mp4_file.read(4)\n if len(second) < 4:\n error_position = format(mp4_file.tell() - len(second), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the second of time, but got EOF.')\n second = int.from_bytes(second, 'little')\n if second < 0 or 60 <= second:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the second of time, but got an invalid value `{second}'.\")\n\n year = mp4_file.read(4)\n if len(year) < 4:\n error_position = format(mp4_file.tell() - len(year), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the year of time, but got EOF.')\n year = int.from_bytes(year, 'little')\n if year == 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n if hour != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year == 0' but `hour != 0'.\")\n if minute != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year == 0' but `minute != 0'.\")\n if second != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year == 0' but `second != 0'.\")\n else:\n year += 2000\n\n month = mp4_file.read(4)\n if len(month) < 4:\n error_position = format(mp4_file.tell() - len(month), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the month of time, but got EOF.')\n month = int.from_bytes(month, 'little')\n if month == 0:\n if year != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year != 0' but `month == 0'.\")\n assert(hour == 0)\n assert(minute == 0)\n assert(second == 0)\n elif month < 1 or 12 < month:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the month of time, but got an invalid value `{month}'.\")\n\n day = mp4_file.read(4)\n if len(day) < 4:\n error_position = format(mp4_file.tell() - len(day), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' the day of time, but got EOF.')\n day = int.from_bytes(day, 'little')\n if day == 0:\n if year != 0:\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\"\n \" `year != 0' but `day == 0'.\")\n assert(month == 0)\n assert(hour == 0)\n assert(minute == 0)\n assert(second == 0)\n elif day < 1 or 31 < day:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n the day of time, but got an invalid value `{day}'.\")\n\n if year == 0:\n assert(month == 0)\n assert(day == 0)\n assert(hour == 0)\n assert(minute == 0)\n assert(second == 0)\n time = None\n else:\n time = datetime.datetime.now(datetime.timezone.utc)\n time = time.astimezone()\n time = time.replace(\n year=year, month=month, day=day, hour=hour, minute=minute,\n second=second, microsecond=0)\n time = Time(time)\n\n if time is None:\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero-padding, but got EOF.')\n padding = int.from_bytes(padding, 'little')\n if padding != 0:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect\"\n f\" zero-padding, but got `{padding}'.\")\n status = None\n latitude_type = '0'\n longitude_type = '0'\n else:\n status = mp4_file.read(1)\n if len(status) < 1:\n error_position = format(mp4_file.tell() - len(status),\n '#010x')\n raise GpsDataError(\n f'{mp4_file.name}:{error_position}: Expect a status'\n ' character, but got EOF.')\n status = status.decode('UTF-8')\n if status not in ('A', 'V'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect `A' or `V' as a status character, but got an invalid character\\\n `{status}'.\")\n\n latitude_type = mp4_file.read(1)\n if len(latitude_type) < 1:\n error_position = format(\n mp4_file.tell() - len(latitude_type), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:\\\n Expect a latitude type, but got EOF.')\n latitude_type = latitude_type.decode('UTF-8')\n if status == 'A':\n if latitude_type not in ('N', 'S'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `N' or\\\n `S' as a latitude type, but got an invalid character `{latitude_type}'.\")\n else:\n assert(status == 'V')\n if latitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect `0' as a latitude type, but got an invalid character\\\n `{latitude_type}'.\")\n\n longitude_type = mp4_file.read(1)\n if len(longitude_type) < 1:\n error_position = format(\n mp4_file.tell() - len(longitude_type), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:\\\n Expect a longitude type, but got EOF.')\n longitude_type = longitude_type.decode('UTF-8')\n if status == 'A':\n if longitude_type not in ('E', 'W'):\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(\n f\"{mp4_file.name}:{error_position}: Expect `E' or\\\n `W' as a longitude type, but got an invalid character `{longitude_type}'.\")\n else:\n assert(status == 'V')\n if longitude_type != '0':\n error_position = format(mp4_file.tell() - 1, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect `0' as a longitude type, but got an invalid character\\\n `{longitude_type}'.\")\n\n padding = mp4_file.read(1)\n if len(padding) < 1:\n error_position = format(mp4_file.tell() - len(padding),\n '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero padding, but got EOF.')\n if padding[0] != 0:\n error_position = format(mp4_file.tell() - 1, '#010x')\n byte = format(padding[0], '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n\n if status == 'A':\n latitude_dmm = read_little_endian_single(mp4_file)\n latitude_degree = math.floor(latitude_dmm / 100)\n if latitude_degree < 0 or 90 < latitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\")\n latitude_minute = latitude_dmm - latitude_degree * 100\n if latitude_minute < 0 or 60 <= latitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a latitude in DMM format, but got an invalid value `{latitude_dmm}'.\")\n latitude_degree += latitude_minute / 60\n latitude = Latitude(latitude_degree)\n else:\n assert(status == 'V' or status is None)\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(\n mp4_file.tell() - len(padding), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero padding, but got EOF.')\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(\n mp4_file.tell() - 4 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n latitude = None\n\n if status == 'A':\n longitude_dmm = read_little_endian_single(mp4_file)\n longitude_degree = math.floor(longitude_dmm / 100)\n if longitude_degree < 0 or 180 < longitude_degree:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a longitude in DMM format, but got an invalid value\\\n `{longitude_dmm}'.\")\n longitude_minute = longitude_dmm - longitude_degree * 100\n if longitude_minute < 0 or 60 <= longitude_minute:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect a longitude in DMM format, but got an invalid value\\\n `{longitude_dmm}'.\")\n longitude_degree += longitude_minute / 60\n longitude = Longitude(longitude_degree)\n else:\n assert(status == 'V' or status is None)\n padding = mp4_file.read(4)\n if len(padding) < 4:\n error_position = format(\n mp4_file.tell() - len(padding), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}:'\n ' Expect zero padding, but got EOF.')\n for j, b in enumerate(padding):\n if b != 0:\n error_position = format(\n mp4_file.tell() - 4 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n longitude = None\n\n speed = read_little_endian_single(mp4_file)\n # Presume that speed is recorded in knots.\n speed *= (1852 / 3600)\n speed = Speed(speed)\n\n azimuth = read_little_endian_single(mp4_file)\n if azimuth < 0 or 360 <= azimuth:\n error_position = format(mp4_file.tell() - 4, '#010x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}: Expect\\\n azimuth degree, but got an invalid value `{azimuth}'.\")\n azimuth = Azimuth(azimuth)\n\n x_acceleration = mp4_file.read(4)\n if len(x_acceleration) < 4:\n error_position = format(\n mp4_file.tell() - len(x_acceleration), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' X-axis acceleration, but got EOF.')\n x_acceleration = int.from_bytes(\n x_acceleration, 'little', signed=True)\n\n y_acceleration = mp4_file.read(4)\n if len(y_acceleration) < 4:\n error_position = format(\n mp4_file.tell() - len(y_acceleration), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' Y-axis acceleration, but got EOF.')\n y_acceleration = int.from_bytes(\n y_acceleration, 'little', signed=True)\n\n z_acceleration = mp4_file.read(4)\n if len(z_acceleration) < 4:\n error_position = format(\n mp4_file.tell() - len(z_acceleration), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect'\n ' Z-axis acceleration, but got EOF.')\n z_acceleration = int.from_bytes(\n z_acceleration, 'little', signed=True)\n\n if mp4_file.tell() != small_block_end:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n the end of a GPS data block, but got additional data.')\n\n padding_size = large_block_end - small_block_end\n if padding_size < 532:\n error_position = format(mp4_file.tell(), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n more than or equal to 532-byte padding, but got only {padding_size}-byte\\\n padding.')\n padding = mp4_file.read(padding_size)\n if len(padding) < padding_size:\n error_position = format(\n mp4_file.tell() - len(padding), '#010x')\n raise GpsDataError(f'{mp4_file.name}:{error_position}: Expect\\\n {padding_size}-byte padding, but got EOF.')\n for j, b in enumerate(padding[:420]):\n if b != 0:\n error_position = format(small_block_end + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n # `_UNKNOWN_BYTES` may appear in the zero padding. However,\n # what this means is unknown. Therefore, just skip it if it\n # appears.\n if padding[420:532] != _UNKNOWN_BYTES:\n for j, b in enumerate(padding[420:532]):\n if b != 0:\n error_position = format(small_block_end + 420 + j,\n '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n for j, b in enumerate(padding[532:]):\n if b != 0:\n error_position = format(small_block_end + 532 + j, '#010x')\n byte = format(b, '#04x')\n raise GpsDataError(f\"{mp4_file.name}:{error_position}:\\\n Expect zero padding, but got an invalid byte `{byte}'.\")\n\n track_point = TrackPoint(\n time, status, latitude, longitude, speed, azimuth,\n x_acceleration, y_acceleration, z_acceleration)\n track_points.append(track_point)\n\n return track_points\n\n\ndef read_input_paths(input_paths: List[pathlib.Path]) -> List[TrackPoint]:\n track_points = []\n\n for input_path in input_paths:\n if input_path.is_dir():\n file_paths = []\n for dirpath, dirnames, filenames in os.walk(input_path):\n dirpath = pathlib.Path(dirpath)\n for filename in filenames:\n file_path = dirpath / filename\n if file_path.suffix not in ('.mp4', '.MP4'):\n continue\n file_paths.append(file_path)\n\n file_paths.sort()\n\n for file_path in file_paths:\n track_points.extend(parse_mp4_file(file_path))\n else:\n track_points.extend(parse_mp4_file(input_path))\n\n return track_points\n\n\ndef write_csv_file(args: Arguments,\n track_points: List[TrackPoint]) -> pathlib.Path:\n if args.name is None:\n print(\"`--name' is required to output a CSV file.\", file=sys.stderr)\n sys.exit(1)\n\n csv_file_path = pathlib.Path(f'{args.name}.csv')\n if csv_file_path.exists():\n if not args.overwrite:\n print(f\"{csv_file_path}: File already exists.\", file=sys.stderr)\n sys.exit(1)\n\n with open(csv_file_path, 'w') as csv_file:\n for track_point in track_points:\n print(track_point.format_as_csv(), file=csv_file)\n\n return csv_file_path\n\n\ndef create_track_segments(\n args: Arguments, track_points: List[TrackPoint]) -> List[TrackSegment]:\n new_track_points = []\n for track_point in track_points:\n if track_point.status != 'A':\n assert(track_point.latitude is None)\n assert(track_point.longitude is None)\n continue\n assert(track_point.latitude is not None)\n assert(track_point.longitude is not None)\n new_track_points.append(track_point)\n track_points = new_track_points\n\n track_points.sort()\n\n if len(track_points) == 0:\n return []\n\n unique_track_points = []\n it = iter(track_points)\n representative_track_point = next(it)\n while True:\n track_point = next(it, None)\n\n if track_point is None:\n unique_track_points.append(representative_track_point)\n break\n\n if track_point.time != representative_track_point.time:\n unique_track_points.append(representative_track_point)\n representative_track_point = track_point\n continue\n\n if track_point.latitude == representative_track_point.latitude\\\n and track_point.longitude == representative_track_point.longitude:\n continue\n\n if args.how_to_unique == 'first':\n continue\n elif args.how_to_unique == 'last':\n representative_track_point = track_point\n else:\n raise RuntimeError(\"There exist track points with the same\\\n timestamp but different coordinates. Use `--uniq' option.\")\n\n track_segments = []\n track_segments.append(TrackSegment())\n for track_point in unique_track_points:\n track_segments[0].append_track_point(track_point)\n\n return track_segments\n\n\ndef as_xml_attribute(data: str) -> str:\n return xml.sax.saxutils.quoteattr(data)\n\n\ndef as_xml_data(data: str) -> str:\n return xml.sax.saxutils.escape(data)\n\n\ndef get_local_time_in_iso8601() -> str:\n utc_now = datetime.datetime.now(datetime.timezone.utc)\n local_aware_now = utc_now.astimezone()\n local_time_in_iso8601 = local_aware_now.strftime('%Y-%m-%dT%H:%M:%S%z')\n return re.sub('([+-]\\\\d{2})(\\\\d{2})$', '\\\\1:\\\\2', local_time_in_iso8601)\n\n\ndef write_gpx_file(args: Arguments,\n track_segments: List[TrackSegment]) -> pathlib.Path:\n all_track_points = []\n for track_segment in track_segments:\n for track_point in track_segment:\n all_track_points.append(track_point)\n\n name = args.name\n if name is None:\n if len(all_track_points) == 0:\n raise ValueError(\n \"`--name' is not specified, and there is no track point.\")\n all_track_points.sort()\n name = all_track_points[0].name\n\n gpx_file_path = pathlib.Path(f'{name}.gpx')\n\n bounds = None\n if len(all_track_points) > 0:\n latitudes = list(t.latitude for t in all_track_points)\n latitudes.sort()\n longitudes = list(t.longitude for t in all_track_points)\n longitudes.sort()\n bounds = (latitudes[0], longitudes[0], latitudes[-1], longitudes[-1])\n\n if gpx_file_path.exists():\n if not args.overwrite:\n print(f'{gpx_file_path}: Error: File already exists.',\n file=sys.stderr)\n sys.exit(1)\n\n with open(gpx_file_path, 'w') as gpx_file:\n print('<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" ?>',\n file=gpx_file)\n print('<gpx xmlns=\"http://www.topografix.com/GPX/1/1\" version=\"1.1\"'\n ' creator=\"papago2gpx\">', file=gpx_file)\n print(' <metadata>', file=gpx_file)\n print(f' <name>{as_xml_data(name)}</name>', file=gpx_file)\n if args.description is not None:\n description = as_xml_data(args.description)\n print(f' <desc>{description}</desc>', file=gpx_file)\n if args.author_name is not None or args.author_email is not None:\n print(' <author>', file=gpx_file)\n if args.author_name is not None:\n author_name = as_xml_data(args.author_name)\n print(f' <name>{author_name}</name>', file=gpx_file)\n if args.author_email is not None:\n author_email_parts = args.author_email.split('@', 1)\n if len(author_email_parts) != 2:\n raise RuntimeError(\n f'An invalid E-mail address: {args.author_email}')\n author_email_id = as_xml_attribute(author_email_parts[0])\n author_email_domain = as_xml_attribute(author_email_parts[1])\n print(f' <email id={author_email_id}\\\n domain={author_email_domain}/>', file=gpx_file)\n print(' </author>', file=gpx_file)\n if args.copyright is not None:\n copyright = as_xml_attribute(args.copyright)\n print(f' <copyright author={copyright}', end='', file=gpx_file)\n copyright_year = args.copyright_year\n copyright_license = args.copyright_license\n if copyright_year is not None or copyright_license is not None:\n print('>', file=gpx_file)\n if copyright_year is not None:\n copyright_year = as_xml_data(str(copyright_year))\n print(f' <year>{copyright_year}</year>',\n file=gpx_file)\n if copyright_license is not None:\n copyright_license = as_xml_data(copyright_license)\n print(f' <license>{copyright_license}</license>',\n file=gpx_file)\n print(' </copyright>', file=gpx_file)\n else:\n print('/>', file=gpx_file)\n print(f' <time>{get_local_time_in_iso8601()}</time>', file=gpx_file)\n if args.keywords is not None:\n keywords = as_xml_data(args.keywords)\n print(f' <keywords>{keywords}</keywords>', file=gpx_file)\n if bounds is not None:\n print(f' <bounds minlat=\"{bounds[0]}\" minlon=\"{bounds[1]}\"\\\n maxlat=\"{bounds[2]}\" maxlon=\"{bounds[3]}\"/>', file=gpx_file)\n print(' </metadata>', file=gpx_file)\n print(' <trk>', file=gpx_file)\n if args.track_name is not None:\n track_name = as_xml_data(args.track_name)\n print(f' <name>{track_name}</name>', file=gpx_file)\n if args.track_comment is not None:\n track_comment = as_xml_data(args.track_comment)\n print(f' <cmt>{track_comment}</cmt>', file=gpx_file)\n if args.track_description is not None:\n track_description = as_xml_data(args.track_description)\n print(f' <desc>{track_description}</desc>', file=gpx_file)\n if args.track_type is not None:\n track_type = as_xml_data(args.track_type)\n print(f' <type>{track_type}</type>', file=gpx_file)\n for track_segment in track_segments:\n print(' <trkseg>', file=gpx_file)\n for track_point in track_segment:\n print(f' <trkpt lat=\"{track_point.latitude}\"\\\n lon=\"{track_point.longitude}\">', file=gpx_file)\n print(f' <time>{track_point.time}</time>',\n file=gpx_file)\n print(' </trkpt>', file=gpx_file)\n print(' </trkseg>', file=gpx_file)\n print(' </trk>', file=gpx_file)\n print('</gpx>', file=gpx_file)\n\n proc = subprocess.run(\n ['xmllint', '--schema', 'gpx.xsd', str(gpx_file_path)],\n stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, encoding='UTF-8')\n if proc.returncode != 0:\n print(f\"\"\"Failed to validate the GPX file `{gpx_file_path}'.\ncommand: {proc.args}\nstdout: {proc.stdout}\nstderr: {proc.stderr}\nreturncode: {proc.returncode}\"\"\", file=sys.stderr)\n\n return gpx_file_path\n\n\nif __name__ == '__main__':\n args = Arguments()\n\n track_points = read_input_paths(args.input_paths)\n\n csv_file_path = write_csv_file(args, track_points)\n print(f\"Succeeded! The result is output to `{csv_file_path}'.\")\n\n track_segments = create_track_segments(args, track_points)\n\n if args.name is None and len(track_segments) == 0:\n print(\"`--name' is not specified, and there is no track segment.\",\n file=sys.stderr)\n sys.exit(1)\n\n if len(track_segments) == 0:\n print('WARNING: There is no track segment.', file=sys.stderr)\n\n gpx_file_path = write_gpx_file(args, track_segments)\n\n print(f\"Succeeded! The result is output to `{gpx_file_path}'.\")\n sys.exit(0)\n",
"step-ids": [
47,
55,
64,
81,
82
]
}
|
[
47,
55,
64,
81,
82
] |
from __future__ import annotations
from typing import Generator, Optional
from collections import Counter
from itertools import zip_longest
from re import finditer
codon_table = """UUU F CUU L AUU I GUU V
UUC F CUC L AUC I GUC V
UUA L CUA L AUA I GUA V
UUG L CUG L AUG M GUG V
UCU S CCU P ACU T GCU A
UCC S CCC P ACC T GCC A
UCA S CCA P ACA T GCA A
UCG S CCG P ACG T GCG A
UAU Y CAU H AAU N GAU D
UAC Y CAC H AAC N GAC D
UAA Stop CAA Q AAA K GAA E
UAG Stop CAG Q AAG K GAG E
UGU C CGU R AGU S GGU G
UGC C CGC R AGC S GGC G
UGA Stop CGA R AGA R GGA G
UGG W CGG R AGG R GGG G"""
codons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2]))
def consensus(*args):
"""Return a consensus sequence from n Seq objects."""
counts = map(Counter, zip_longest(*args))
consensus = ""
for c in counts:
del c[None]
consensus += c.most_common(1)[0][0]
return Seq(consensus, args[0].id)
class Base(str):
"""Class for nucleotide bases"""
pass
class Seq:
"""Class for nucleotide sequences"""
def __init__(self, sequence: str, id: str = None, codons: dict = codons):
self.sequence = sequence
self.id = id
self.codons = codons
def __repr__(self):
if not self.id:
return f"Seq({self.sequence[:60]})"
concat = ""
if len(self) > 60:
concat = "..."
return f"Seq({self.sequence[:60]}{concat}, id='{self.id}')"
def __str__(self):
return self.sequence
def __len__(self) -> int:
return len(self.sequence)
def __invert__(self) -> Seq:
"""Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence"""
return self.reverse_complement()
def __eq__(self, other) -> bool:
"""Compare the string representations of two Seq objects"""
return str(self) == str(other)
def __add__(self, other: Seq) -> Seq:
"""Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the
concatenation of the two objects sequences. ID is taken from eh first object"""
new_sequence = self.sequence + other.sequence
return Seq(new_sequence, self.id)
def __sub__(self, other: Seq) -> int:
"""Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them"""
return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))
def __getitem__(self, index):
if type(index) == int:
return Base(self.sequence[index])
if type(index) == str:
return self.find(index, overlapping=True)
return Seq(self.sequence[index], self.id)
def __setitem__(self, index, nt):
self.sequence = self.sequence[:index] + nt + self.sequence[index + 1 :]
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self):
result = self[self.n]
self.n += 1
return result
else:
raise StopIteration
def __contains__(self, other):
if str(other) in str(self):
return True
else:
return False
@property
def gc(self) -> float:
"""Return the GC content of the sequence"""
g = self.count("G")
c = self.count("C")
return (g + c) / len(self) * 100
@property
def counts(self) -> dict:
"""Return the counts of letters in the sequence"""
return Counter(self.sequence)
def to_fasta(self, line_length: int = 60) -> str:
formated_sequence = "\n".join(
[str(s) for s in self.kmers(line_length, line_length)]
)
return f">{self.id}\n{formated_sequence}\n"
def kmers(self, n: int, step: int = 1) -> Generator:
"""Return a generator for kmers of length n"""
return (
Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)
)
def count(self, string: str, max_diff: int = 0) -> int:
if max_diff == 0:
return self.sequence.count(string)
other = Seq(string)
return sum((kmer - other) <= max_diff for kmer in self.kmers(len(other)))
def substitute(self, old: str, new: str, count: int = -1):
return Seq(self.sequence.replace(str(old), str(new), count), self.id)
def find(self, target: str, count: int = -1, overlapping: bool = False):
locs = []
if overlapping and len(target) > 1:
target = f"(?=({target}))"
matches = finditer(target, self.sequence)
for i, match in enumerate(matches, 1):
locs.append(match.start())
if i == count:
break
return locs
def find_one(self, target: str) -> Optional[str]:
loc = self.sequence.find(str(target))
if loc == -1:
return None
return loc
def reverse_complement(self, rna: bool = False) -> Seq:
complements = {"A": "T", "T": "A", "G": "C", "C": "G"}
if rna:
complements = {"A": "U", "U": "A", "G": "C", "C": "G"}
revc = "".join(complements[nt] for nt in reversed(self))
return Seq(revc, self.id)
def transcribe(self) -> Seq:
return Seq(self.sequence.replace("T", "U"), self.id)
def reverse_transcribe(self) -> Seq:
return Seq(self.sequence.replace("U", "T"), self.id)
def translate(self) -> Seq:
"""
Return the translated sequence.
*Currently stop signals are ignored.*
"""
AA = "".join(
self.codons[self.sequence[i : i + 3]]
for i in range(0, len(self.sequence), 3)
if self.codons[self.sequence[i : i + 3]] != "Stop"
)
return Seq(AA, self.id)
def startswith(self, seq: str) -> bool:
return self.sequence.startswith(str(seq))
def endswith(self, seq: str) -> bool:
return self.sequence.endswith(str(seq))
|
normal
|
{
"blob_id": "3d742505d480493fbc729e7a0febdcab3a7dc041",
"index": 9386,
"step-1": "<mask token>\n\n\nclass Seq:\n <mask token>\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n <mask token>\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n <mask token>\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n <mask token>\n <mask token>\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n <mask token>\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n <mask token>\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Seq:\n <mask token>\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n\n def __invert__(self) ->Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) ->int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.\n sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n <mask token>\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n\n def count(self, string: str, max_diff: int=0) ->int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum(kmer - other <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n <mask token>\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Base(str):\n \"\"\"Class for nucleotide bases\"\"\"\n pass\n\n\nclass Seq:\n \"\"\"Class for nucleotide sequences\"\"\"\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n\n def __invert__(self) ->Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) ->int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.\n sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n\n def count(self, string: str, max_diff: int=0) ->int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum(kmer - other <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n\n def reverse_complement(self, rna: bool=False) ->Seq:\n complements = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n if rna:\n complements = {'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G'}\n revc = ''.join(complements[nt] for nt in reversed(self))\n return Seq(revc, self.id)\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n\n def startswith(self, seq: str) ->bool:\n return self.sequence.startswith(str(seq))\n\n def endswith(self, seq: str) ->bool:\n return self.sequence.endswith(str(seq))\n",
"step-4": "<mask token>\ncodon_table = \"\"\"UUU F CUU L AUU I GUU V\nUUC F CUC L AUC I GUC V\nUUA L CUA L AUA I GUA V\nUUG L CUG L AUG M GUG V\nUCU S CCU P ACU T GCU A\nUCC S CCC P ACC T GCC A\nUCA S CCA P ACA T GCA A\nUCG S CCG P ACG T GCG A\nUAU Y CAU H AAU N GAU D\nUAC Y CAC H AAC N GAC D\nUAA Stop CAA Q AAA K GAA E\nUAG Stop CAG Q AAG K GAG E\nUGU C CGU R AGU S GGU G\nUGC C CGC R AGC S GGC G\nUGA Stop CGA R AGA R GGA G\nUGG W CGG R AGG R GGG G\"\"\"\ncodons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2]))\n\n\ndef consensus(*args):\n \"\"\"Return a consensus sequence from n Seq objects.\"\"\"\n counts = map(Counter, zip_longest(*args))\n consensus = ''\n for c in counts:\n del c[None]\n consensus += c.most_common(1)[0][0]\n return Seq(consensus, args[0].id)\n\n\nclass Base(str):\n \"\"\"Class for nucleotide bases\"\"\"\n pass\n\n\nclass Seq:\n \"\"\"Class for nucleotide sequences\"\"\"\n\n def __init__(self, sequence: str, id: str=None, codons: dict=codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f'Seq({self.sequence[:60]})'\n concat = ''\n if len(self) > 60:\n concat = '...'\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) ->int:\n return len(self.sequence)\n\n def __invert__(self) ->Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) ->bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) ->Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) ->int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.\n sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1:]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) ->float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count('G')\n c = self.count('C')\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) ->dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int=60) ->str:\n formated_sequence = '\\n'.join([str(s) for s in self.kmers(\n line_length, line_length)])\n return f'>{self.id}\\n{formated_sequence}\\n'\n\n def kmers(self, n: int, step: int=1) ->Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (Seq(self.sequence[i:i + n]) for i in range(0, len(self.\n sequence), step))\n\n def count(self, string: str, max_diff: int=0) ->int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum(kmer - other <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int=-1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int=-1, overlapping: bool=False):\n locs = []\n if overlapping and len(target) > 1:\n target = f'(?=({target}))'\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) ->Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n\n def reverse_complement(self, rna: bool=False) ->Seq:\n complements = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n if rna:\n complements = {'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G'}\n revc = ''.join(complements[nt] for nt in reversed(self))\n return Seq(revc, self.id)\n\n def transcribe(self) ->Seq:\n return Seq(self.sequence.replace('T', 'U'), self.id)\n\n def reverse_transcribe(self) ->Seq:\n return Seq(self.sequence.replace('U', 'T'), self.id)\n\n def translate(self) ->Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = ''.join(self.codons[self.sequence[i:i + 3]] for i in range(0,\n len(self.sequence), 3) if self.codons[self.sequence[i:i + 3]] !=\n 'Stop')\n return Seq(AA, self.id)\n\n def startswith(self, seq: str) ->bool:\n return self.sequence.startswith(str(seq))\n\n def endswith(self, seq: str) ->bool:\n return self.sequence.endswith(str(seq))\n",
"step-5": "from __future__ import annotations\n\nfrom typing import Generator, Optional\nfrom collections import Counter\nfrom itertools import zip_longest\nfrom re import finditer\n\ncodon_table = \"\"\"UUU F CUU L AUU I GUU V\nUUC F CUC L AUC I GUC V\nUUA L CUA L AUA I GUA V\nUUG L CUG L AUG M GUG V\nUCU S CCU P ACU T GCU A\nUCC S CCC P ACC T GCC A\nUCA S CCA P ACA T GCA A\nUCG S CCG P ACG T GCG A\nUAU Y CAU H AAU N GAU D\nUAC Y CAC H AAC N GAC D\nUAA Stop CAA Q AAA K GAA E\nUAG Stop CAG Q AAG K GAG E\nUGU C CGU R AGU S GGU G\nUGC C CGC R AGC S GGC G\nUGA Stop CGA R AGA R GGA G\nUGG W CGG R AGG R GGG G\"\"\"\n\ncodons = dict(zip(codon_table.split()[::2], codon_table.split()[1::2]))\n\n\ndef consensus(*args):\n \"\"\"Return a consensus sequence from n Seq objects.\"\"\"\n counts = map(Counter, zip_longest(*args))\n consensus = \"\"\n for c in counts:\n del c[None]\n consensus += c.most_common(1)[0][0]\n return Seq(consensus, args[0].id)\n\n\nclass Base(str):\n \"\"\"Class for nucleotide bases\"\"\"\n\n pass\n\n\nclass Seq:\n \"\"\"Class for nucleotide sequences\"\"\"\n\n def __init__(self, sequence: str, id: str = None, codons: dict = codons):\n self.sequence = sequence\n self.id = id\n self.codons = codons\n\n def __repr__(self):\n if not self.id:\n return f\"Seq({self.sequence[:60]})\"\n concat = \"\"\n if len(self) > 60:\n concat = \"...\"\n return f\"Seq({self.sequence[:60]}{concat}, id='{self.id}')\"\n\n def __str__(self):\n return self.sequence\n\n def __len__(self) -> int:\n return len(self.sequence)\n\n def __invert__(self) -> Seq:\n \"\"\"Inverting a Seq object (i.e. ~Seq) will return the reverse complement of that sequence\"\"\"\n return self.reverse_complement()\n\n def __eq__(self, other) -> bool:\n \"\"\"Compare the string representations of two Seq objects\"\"\"\n return str(self) == str(other)\n\n def __add__(self, other: Seq) -> Seq:\n \"\"\"Adding two sequence objects (i.e. Seq1 + Seq2) returns a new Seq object that is the\n concatenation of the two objects sequences. ID is taken from eh first object\"\"\"\n new_sequence = self.sequence + other.sequence\n return Seq(new_sequence, self.id)\n\n def __sub__(self, other: Seq) -> int:\n \"\"\"Subtracting two Seq objects (i.e. seq1 - seq2) returns the hamming difference between them\"\"\"\n return sum(i != j for i, j in zip_longest(self.sequence, other.sequence))\n\n def __getitem__(self, index):\n if type(index) == int:\n return Base(self.sequence[index])\n if type(index) == str:\n return self.find(index, overlapping=True)\n return Seq(self.sequence[index], self.id)\n\n def __setitem__(self, index, nt):\n self.sequence = self.sequence[:index] + nt + self.sequence[index + 1 :]\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self):\n result = self[self.n]\n self.n += 1\n return result\n else:\n raise StopIteration\n\n def __contains__(self, other):\n if str(other) in str(self):\n return True\n else:\n return False\n\n @property\n def gc(self) -> float:\n \"\"\"Return the GC content of the sequence\"\"\"\n g = self.count(\"G\")\n c = self.count(\"C\")\n return (g + c) / len(self) * 100\n\n @property\n def counts(self) -> dict:\n \"\"\"Return the counts of letters in the sequence\"\"\"\n return Counter(self.sequence)\n\n def to_fasta(self, line_length: int = 60) -> str:\n formated_sequence = \"\\n\".join(\n [str(s) for s in self.kmers(line_length, line_length)]\n )\n return f\">{self.id}\\n{formated_sequence}\\n\"\n\n def kmers(self, n: int, step: int = 1) -> Generator:\n \"\"\"Return a generator for kmers of length n\"\"\"\n return (\n Seq(self.sequence[i : i + n]) for i in range(0, len(self.sequence), step)\n )\n\n def count(self, string: str, max_diff: int = 0) -> int:\n if max_diff == 0:\n return self.sequence.count(string)\n other = Seq(string)\n return sum((kmer - other) <= max_diff for kmer in self.kmers(len(other)))\n\n def substitute(self, old: str, new: str, count: int = -1):\n return Seq(self.sequence.replace(str(old), str(new), count), self.id)\n\n def find(self, target: str, count: int = -1, overlapping: bool = False):\n locs = []\n if overlapping and len(target) > 1:\n target = f\"(?=({target}))\"\n matches = finditer(target, self.sequence)\n for i, match in enumerate(matches, 1):\n locs.append(match.start())\n if i == count:\n break\n return locs\n\n def find_one(self, target: str) -> Optional[str]:\n loc = self.sequence.find(str(target))\n if loc == -1:\n return None\n return loc\n\n def reverse_complement(self, rna: bool = False) -> Seq:\n complements = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n if rna:\n complements = {\"A\": \"U\", \"U\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n revc = \"\".join(complements[nt] for nt in reversed(self))\n return Seq(revc, self.id)\n\n def transcribe(self) -> Seq:\n return Seq(self.sequence.replace(\"T\", \"U\"), self.id)\n\n def reverse_transcribe(self) -> Seq:\n return Seq(self.sequence.replace(\"U\", \"T\"), self.id)\n\n def translate(self) -> Seq:\n \"\"\"\n Return the translated sequence.\n *Currently stop signals are ignored.*\n \"\"\"\n AA = \"\".join(\n self.codons[self.sequence[i : i + 3]]\n for i in range(0, len(self.sequence), 3)\n if self.codons[self.sequence[i : i + 3]] != \"Stop\"\n )\n return Seq(AA, self.id)\n\n def startswith(self, seq: str) -> bool:\n return self.sequence.startswith(str(seq))\n\n def endswith(self, seq: str) -> bool:\n return self.sequence.endswith(str(seq))\n",
"step-ids": [
20,
24,
31,
33,
35
]
}
|
[
20,
24,
31,
33,
35
] |
<|reserved_special_token_0|>
def enter():
global Start_menu
Start_menu = Menu()
menu_world.add_object(Start_menu, 0)
<|reserved_special_token_0|>
def handle_events():
global Start_menu, menu_time
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif Start_menu.start == 1:
menu_time = get_time()
game_framework.change_state(game_state)
else:
Start_menu.handle_event(event)
def update():
for game_object in menu_world.all_objects():
game_object.update()
def draw():
clear_canvas()
for game_object in menu_world.all_objects():
game_object.draw()
update_canvas()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def enter():
global Start_menu
Start_menu = Menu()
menu_world.add_object(Start_menu, 0)
<|reserved_special_token_0|>
def pause():
pass
def resume():
pass
def handle_events():
global Start_menu, menu_time
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif Start_menu.start == 1:
menu_time = get_time()
game_framework.change_state(game_state)
else:
Start_menu.handle_event(event)
def update():
for game_object in menu_world.all_objects():
game_object.update()
def draw():
clear_canvas()
for game_object in menu_world.all_objects():
game_object.draw()
update_canvas()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def enter():
global Start_menu
Start_menu = Menu()
menu_world.add_object(Start_menu, 0)
def exit():
menu_world.clear()
def pause():
pass
def resume():
pass
def handle_events():
global Start_menu, menu_time
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif Start_menu.start == 1:
menu_time = get_time()
game_framework.change_state(game_state)
else:
Start_menu.handle_event(event)
def update():
for game_object in menu_world.all_objects():
game_object.update()
def draw():
clear_canvas()
for game_object in menu_world.all_objects():
game_object.draw()
update_canvas()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
name = 'MenuState'
boy = None
Start_menu = None
menu_time = None
def enter():
global Start_menu
Start_menu = Menu()
menu_world.add_object(Start_menu, 0)
def exit():
menu_world.clear()
def pause():
pass
def resume():
pass
def handle_events():
global Start_menu, menu_time
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif Start_menu.start == 1:
menu_time = get_time()
game_framework.change_state(game_state)
else:
Start_menu.handle_event(event)
def update():
for game_object in menu_world.all_objects():
game_object.update()
def draw():
clear_canvas()
for game_object in menu_world.all_objects():
game_object.draw()
update_canvas()
<|reserved_special_token_1|>
import random
import json
import os
from pico2d import *
import game_framework
import game_world
import menu_world
import game_state
from Start_menu import Menu
name = "MenuState"
boy = None
Start_menu = None
menu_time =None
def enter():
global Start_menu
Start_menu = Menu()
menu_world.add_object(Start_menu, 0)
def exit():
menu_world.clear()
def pause():
pass
def resume():
pass
def handle_events():
global Start_menu,menu_time
events = get_events()
for event in events:
if event.type == SDL_QUIT:
game_framework.quit()
elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:
game_framework.quit()
elif Start_menu.start ==1:
menu_time =get_time()
game_framework.change_state(game_state)
#game_framework.quit()
else:
Start_menu.handle_event(event)
def update():
for game_object in menu_world.all_objects():
game_object.update()
def draw():
clear_canvas()
for game_object in menu_world.all_objects():
game_object.draw()
update_canvas()
|
flexible
|
{
"blob_id": "fee2ddca5888c9db00d2d7a4fe11ba20c4e31685",
"index": 1909,
"step-1": "<mask token>\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\n<mask token>\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-2": "<mask token>\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\n<mask token>\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-3": "<mask token>\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\ndef exit():\n menu_world.clear()\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-4": "<mask token>\nname = 'MenuState'\nboy = None\nStart_menu = None\nmenu_time = None\n\n\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\n\ndef exit():\n menu_world.clear()\n\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu, menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start == 1:\n menu_time = get_time()\n game_framework.change_state(game_state)\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n",
"step-5": "import random\nimport json\nimport os\n\nfrom pico2d import *\nimport game_framework\nimport game_world\nimport menu_world\nimport game_state\n\n\nfrom Start_menu import Menu\n\nname = \"MenuState\"\n\nboy = None\nStart_menu = None\nmenu_time =None\ndef enter():\n global Start_menu\n Start_menu = Menu()\n menu_world.add_object(Start_menu, 0)\n\ndef exit():\n menu_world.clear()\n\ndef pause():\n pass\n\n\ndef resume():\n pass\n\n\ndef handle_events():\n global Start_menu,menu_time\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN and event.key == SDLK_ESCAPE:\n game_framework.quit()\n elif Start_menu.start ==1:\n menu_time =get_time()\n game_framework.change_state(game_state)\n\n #game_framework.quit()\n else:\n Start_menu.handle_event(event)\n\n\ndef update():\n for game_object in menu_world.all_objects():\n game_object.update()\n\n\n\ndef draw():\n clear_canvas()\n for game_object in menu_world.all_objects():\n game_object.draw()\n update_canvas()\n\n\n\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tkinter.filedialog.askopenfilename()
<|reserved_special_token_0|>
from_file.close()
<|reserved_special_token_0|>
to_file.write('Copy\n')
to_file.write(contents)
to_file.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
tkinter.filedialog.askopenfilename()
from_filename = tkinter.filedialog.askopenfilename()
to_filename = tkinter.filedialog.asksaveasfilename()
from_file = open(from_filename, 'r')
contents = from_file.read()
from_file.close()
to_file = open(to_filename, 'w')
to_file.write('Copy\n')
to_file.write(contents)
to_file.close()
<|reserved_special_token_1|>
import tkinter.filedialog
tkinter.filedialog.askopenfilename()
from_filename = tkinter.filedialog.askopenfilename()
to_filename = tkinter.filedialog.asksaveasfilename()
from_file = open(from_filename, 'r')
contents = from_file.read()
from_file.close()
to_file = open(to_filename, 'w')
to_file.write('Copy\n')
to_file.write(contents)
to_file.close()
<|reserved_special_token_1|>
# Write files
# Writing to a file within a Python program:
# In order to write to a file, we use file.write(str).
# This method writes a string to a file.
# The method write() works like Python's print() function, except it does not add a newline ("\n") character.
# File dialogs:
# Module tkinter has a submodule called filedialog. We import it like this:
import tkinter.filedialog
# Function askopenfilename() asks the user to select a file to open:
tkinter.filedialog.askopenfilename()
# This function returns the full path to the file, so we can use that when we call the function open() to open that file.
from_filename = tkinter.filedialog.askopenfilename()
# Function asksaveasfilename() asks the user to select a file to save to, and provides a warning if the file already exists.
to_filename = tkinter.filedialog.asksaveasfilename()
### Example ###
# Below is a program that copies a file, but puts "Copy" as the first line of the copied file.
# First prompt the user to pick a file, then open the file that we want to read from and get the contents:
from_file = open(from_filename, 'r')
contents = from_file.read()
from_file.close()
# Now we can open the file we want to write to and write the contents:
to_file = open(to_filename, 'w')
to_file.write('Copy\n') # we have to add the newline ourselves
to_file.write(contents) # now write the contents of the file
to_file.close()
|
flexible
|
{
"blob_id": "0372cdbae8c5b0bbcbade86a5a7de28c1ee513b1",
"index": 2486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntkinter.filedialog.askopenfilename()\n<mask token>\nfrom_file.close()\n<mask token>\nto_file.write('Copy\\n')\nto_file.write(contents)\nto_file.close()\n",
"step-3": "<mask token>\ntkinter.filedialog.askopenfilename()\nfrom_filename = tkinter.filedialog.askopenfilename()\nto_filename = tkinter.filedialog.asksaveasfilename()\nfrom_file = open(from_filename, 'r')\ncontents = from_file.read()\nfrom_file.close()\nto_file = open(to_filename, 'w')\nto_file.write('Copy\\n')\nto_file.write(contents)\nto_file.close()\n",
"step-4": "import tkinter.filedialog\ntkinter.filedialog.askopenfilename()\nfrom_filename = tkinter.filedialog.askopenfilename()\nto_filename = tkinter.filedialog.asksaveasfilename()\nfrom_file = open(from_filename, 'r')\ncontents = from_file.read()\nfrom_file.close()\nto_file = open(to_filename, 'w')\nto_file.write('Copy\\n')\nto_file.write(contents)\nto_file.close()\n",
"step-5": "# Write files\n\n# Writing to a file within a Python program:\n# In order to write to a file, we use file.write(str).\n# This method writes a string to a file.\n# The method write() works like Python's print() function, except it does not add a newline (\"\\n\") character.\n\n# File dialogs:\n\n# Module tkinter has a submodule called filedialog. We import it like this:\nimport tkinter.filedialog\n\n# Function askopenfilename() asks the user to select a file to open:\ntkinter.filedialog.askopenfilename()\n\n# This function returns the full path to the file, so we can use that when we call the function open() to open that file.\nfrom_filename = tkinter.filedialog.askopenfilename()\n\n# Function asksaveasfilename() asks the user to select a file to save to, and provides a warning if the file already exists.\nto_filename = tkinter.filedialog.asksaveasfilename()\n\n### Example ###\n\n# Below is a program that copies a file, but puts \"Copy\" as the first line of the copied file.\n# First prompt the user to pick a file, then open the file that we want to read from and get the contents:\nfrom_file = open(from_filename, 'r')\ncontents = from_file.read()\nfrom_file.close()\n\n# Now we can open the file we want to write to and write the contents:\nto_file = open(to_filename, 'w')\nto_file.write('Copy\\n') # we have to add the newline ourselves\nto_file.write(contents) # now write the contents of the file\nto_file.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class userSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'email']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class dataSerializer(serializers.ModelSerializer):
class Meta:
model = data
fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']
class userSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'email']
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import data
from django.contrib.auth.models import User
class dataSerializer(serializers.ModelSerializer):
class Meta:
model = data
fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']
class userSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['username', 'password', 'email']
|
flexible
|
{
"blob_id": "972c479ea40232e14fbf678ca2ccf9716e473fe8",
"index": 9736,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass userSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n",
"step-3": "<mask token>\n\n\nclass dataSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = data\n fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']\n\n\nclass userSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n",
"step-4": "from rest_framework import serializers\nfrom .models import data\nfrom django.contrib.auth.models import User\n\n\nclass dataSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = data\n fields = ['id', 'task', 'duedate', 'person', 'done', 'task_user']\n\n\nclass userSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def shuffle():
l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
random.shuffle(l_digits)
return ''.join(l_digits)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def shuffle():
l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
random.shuffle(l_digits)
return ''.join(l_digits)
with open('hello.txt', 'r+') as f:
map = mmap.mmap(f.fileno(), 1000)
l_i = 0
for l_digit in shuffle():
map[l_i] = l_digit
l_i += 1
<|reserved_special_token_1|>
import mmap
import random
def shuffle():
l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
random.shuffle(l_digits)
return ''.join(l_digits)
with open('hello.txt', 'r+') as f:
map = mmap.mmap(f.fileno(), 1000)
l_i = 0
for l_digit in shuffle():
map[l_i] = l_digit
l_i += 1
<|reserved_special_token_1|>
import mmap;
import random;
def shuffle():
l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];
random.shuffle(l_digits);
return "".join(l_digits);
with open("hello.txt", "r+") as f:
map = mmap.mmap(f.fileno(), 1000);
l_i = 0;
for l_digit in shuffle():
map[l_i] = l_digit;
l_i += 1;
|
flexible
|
{
"blob_id": "b0468e58c4d0387a92ba96e8fb8a876ece256c78",
"index": 6507,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef shuffle():\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n random.shuffle(l_digits)\n return ''.join(l_digits)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef shuffle():\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n random.shuffle(l_digits)\n return ''.join(l_digits)\n\n\nwith open('hello.txt', 'r+') as f:\n map = mmap.mmap(f.fileno(), 1000)\n l_i = 0\n for l_digit in shuffle():\n map[l_i] = l_digit\n l_i += 1\n",
"step-4": "import mmap\nimport random\n\n\ndef shuffle():\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n random.shuffle(l_digits)\n return ''.join(l_digits)\n\n\nwith open('hello.txt', 'r+') as f:\n map = mmap.mmap(f.fileno(), 1000)\n l_i = 0\n for l_digit in shuffle():\n map[l_i] = l_digit\n l_i += 1\n",
"step-5": "import mmap;\r\nimport random;\r\n\r\ndef shuffle():\r\n l_digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'];\r\n random.shuffle(l_digits);\r\n\r\n return \"\".join(l_digits);\r\n\r\n\r\nwith open(\"hello.txt\", \"r+\") as f:\r\n map = mmap.mmap(f.fileno(), 1000);\r\n l_i = 0;\r\n\r\n for l_digit in shuffle():\r\n map[l_i] = l_digit;\r\n l_i += 1;",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RegisterView(View):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class HomeView(View):
def get(self, request):
return HttpResponse(f'Home Page | Logged in as - {request.user}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegisterView(View):
def get(self, request):
return render(request, 'users/register.html', locals())
<|reserved_special_token_0|>
class HomeView(View):
def get(self, request):
return HttpResponse(f'Home Page | Logged in as - {request.user}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegisterView(View):
def get(self, request):
return render(request, 'users/register.html', locals())
def post(self, request):
try:
user = User(first_name=request.POST.get('first_name'),
last_name=request.POST.get('last_name'), email=request.POST
.get('email'), username=request.POST.get('email'))
user.set_password(request.POST.get('password'))
user.save()
except Exception as e:
print(e)
return render(request, 'users/register.html', locals())
return HttpResponseRedirect('/users/login')
class HomeView(View):
def get(self, request):
return HttpResponse(f'Home Page | Logged in as - {request.user}')
<|reserved_special_token_1|>
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.views.generic import View
from django.contrib.auth import login
from django.contrib.auth.models import User
class RegisterView(View):
def get(self, request):
return render(request, 'users/register.html', locals())
def post(self, request):
try:
user = User(first_name=request.POST.get('first_name'),
last_name=request.POST.get('last_name'), email=request.POST
.get('email'), username=request.POST.get('email'))
user.set_password(request.POST.get('password'))
user.save()
except Exception as e:
print(e)
return render(request, 'users/register.html', locals())
return HttpResponseRedirect('/users/login')
class HomeView(View):
def get(self, request):
return HttpResponse(f'Home Page | Logged in as - {request.user}')
<|reserved_special_token_1|>
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.views.generic import View
from django.contrib.auth import login
from django.contrib.auth.models import User
class RegisterView(View):
def get(self, request):
return render(request, 'users/register.html', locals())
def post(self, request):
try:
user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get(
'last_name'), email=request.POST.get('email'), username=request.POST.get('email'))
user.set_password(request.POST.get('password'))
user.save()
except Exception as e:
print(e)
return render(request, 'users/register.html', locals())
return HttpResponseRedirect('/users/login')
class HomeView(View):
def get(self, request):
return HttpResponse(f"Home Page | Logged in as - {request.user}")
|
flexible
|
{
"blob_id": "c9191df0fc04818b4df9c93a9479f75a60688aa9",
"index": 6372,
"step-1": "<mask token>\n\n\nclass RegisterView(View):\n <mask token>\n <mask token>\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-2": "<mask token>\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n <mask token>\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-3": "<mask token>\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'), email=request.POST\n .get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-4": "from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'), email=request.POST\n .get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n",
"step-5": "from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(View):\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get(\n 'last_name'), email=request.POST.get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n def get(self, request):\n return HttpResponse(f\"Home Page | Logged in as - {request.user}\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Point(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Point(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __repr__(self):
return 'Point({},{})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
assert isinstance(other, Point)
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class Point(object):
"""A point is an ordered pair, (x,y)"""
def __init__(self, x, y):
assert isinstance(x, numbers.Number)
assert isinstance(y, numbers.Number)
self.x = x
self.y = y
log.debug('Created Point {}'.format(repr(self)))
def __repr__(self):
return 'Point({},{})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
assert isinstance(other, Point)
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numbers
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class Point(object):
"""A point is an ordered pair, (x,y)"""
def __init__(self, x, y):
assert isinstance(x, numbers.Number)
assert isinstance(y, numbers.Number)
self.x = x
self.y = y
log.debug('Created Point {}'.format(repr(self)))
def __repr__(self):
return 'Point({},{})'.format(self.x, self.y)
def __str__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
assert isinstance(other, Point)
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug('ll will be {}'.format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug('ur will be {}'.format(self.ur))
log.debug('Created rect {}'.format(repr(self)))
def __repr__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __str__(self):
return 'Rect({},{})'.format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,
other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur
.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
<|reserved_special_token_1|>
"""
Rectangles: Compute overlapping region of two rectangles.
Point(x: number, y: number): Cartesian coordinate pair
Rect(ll: Point, ur: Point): A rectangle defined by lower left
and upper right coordinates
Rect.overlaps(other: Rect) -> boolean: True if non-empty overlap
Rect.intersect(other: Rect) -> Rect:
region of intersection if non-empty,
or empty Rect from 0,0 to 0,0 if not Rect.overlaps(other)
CIS 211 Project 1
Author: Noah Tigner
UO email: nzt@uoregon.edu
"""
import numbers
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# To turn on debugging output, change the above to
# log.setLevel(logging.DEBUG)
class Point(object):
"""A point is an ordered pair, (x,y)"""
def __init__(self, x, y):
assert isinstance(x, numbers.Number)
assert isinstance(y, numbers.Number)
self.x = x
self.y = y
log.debug("Created Point {}".format(repr(self)))
def __repr__(self):
return "Point({},{})".format(self.x, self.y)
def __str__(self):
return "({}, {})".format(self.x, self.y)
def __eq__(self, other):
assert(isinstance(other, Point))
return self.x == other.x and self.y == other.y
class Rect(object):
"""A rectangle identified by its lower left
and upper right corners.
"""
def __init__(self, ll, ur):
"""Initialize rectangle with ll and ur as corners."""
log.debug("Rect from ll {}, ur {}".format(repr(ll), repr(ur)))
# Ensure ll really is lower left and ur really is upper right
self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))
log.debug("ll will be {}".format(self.ll))
self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))
log.debug("ur will be {}".format(self.ur))
log.debug("Created rect {}".format(repr(self)))
def __repr__(self):
return "Rect({},{})".format(self.ll, self.ur)
def __str__(self):
return "Rect({},{})".format(self.ll, self.ur)
def __eq__(self, other):
assert isinstance(other, Rect)
return self.ll == other.ll and self.ur == other.ur
def overlaps(self, other):
"""r1.overlaps(r2) if there is an area of positive
size within r1 and also within r2. "Of positive size"
means that touching at a corner or along an edge is
not enough ... the area of overlap must be positive.
"""
if self.ll.x >= other.ur.x:
return False
if self.ll.y >= other.ur.y:
return False
if self.ur.x <= other.ll.x:
return False
if self.ur.y <= other.ll.y:
return False
return True
def intersect(self, other):
"""Region of overlap, or (0,0),(0,0) if none"""
if self.overlaps(other):
return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y, other.ll.y)),
Point(min(self.ur.x, other.ur.x), min(self.ur.y, other.ur.y)))
else:
return Rect(Point(0, 0), Point(0, 0))
|
flexible
|
{
"blob_id": "7b9660bba6fcb8c725251971f3733a1cc915c0e7",
"index": 760,
"step-1": "<mask token>\n\n\nclass Point(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Rect(object):\n \"\"\"A rectangle identified by its lower left\n and upper right corners.\n \"\"\"\n\n def __init__(self, ll, ur):\n \"\"\"Initialize rectangle with ll and ur as corners.\"\"\"\n log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))\n self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))\n log.debug('ll will be {}'.format(self.ll))\n self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))\n log.debug('ur will be {}'.format(self.ur))\n log.debug('Created rect {}'.format(repr(self)))\n\n def __repr__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __str__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __eq__(self, other):\n assert isinstance(other, Rect)\n return self.ll == other.ll and self.ur == other.ur\n\n def overlaps(self, other):\n \"\"\"r1.overlaps(r2) if there is an area of positive\n size within r1 and also within r2. \"Of positive size\"\n means that touching at a corner or along an edge is\n not enough ... the area of overlap must be positive.\n \"\"\"\n if self.ll.x >= other.ur.x:\n return False\n if self.ll.y >= other.ur.y:\n return False\n if self.ur.x <= other.ll.x:\n return False\n if self.ur.y <= other.ll.y:\n return False\n return True\n\n def intersect(self, other):\n \"\"\"Region of overlap, or (0,0),(0,0) if none\"\"\"\n if self.overlaps(other):\n return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,\n other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur\n .y, other.ur.y)))\n else:\n return Rect(Point(0, 0), Point(0, 0))\n",
"step-2": "<mask token>\n\n\nclass Point(object):\n <mask token>\n <mask token>\n\n def __repr__(self):\n return 'Point({},{})'.format(self.x, self.y)\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n\n def __eq__(self, other):\n assert isinstance(other, Point)\n return self.x == other.x and self.y == other.y\n\n\nclass Rect(object):\n \"\"\"A rectangle identified by its lower left\n and upper right corners.\n \"\"\"\n\n def __init__(self, ll, ur):\n \"\"\"Initialize rectangle with ll and ur as corners.\"\"\"\n log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))\n self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))\n log.debug('ll will be {}'.format(self.ll))\n self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))\n log.debug('ur will be {}'.format(self.ur))\n log.debug('Created rect {}'.format(repr(self)))\n\n def __repr__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __str__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __eq__(self, other):\n assert isinstance(other, Rect)\n return self.ll == other.ll and self.ur == other.ur\n\n def overlaps(self, other):\n \"\"\"r1.overlaps(r2) if there is an area of positive\n size within r1 and also within r2. \"Of positive size\"\n means that touching at a corner or along an edge is\n not enough ... the area of overlap must be positive.\n \"\"\"\n if self.ll.x >= other.ur.x:\n return False\n if self.ll.y >= other.ur.y:\n return False\n if self.ur.x <= other.ll.x:\n return False\n if self.ur.y <= other.ll.y:\n return False\n return True\n\n def intersect(self, other):\n \"\"\"Region of overlap, or (0,0),(0,0) if none\"\"\"\n if self.overlaps(other):\n return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,\n other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur\n .y, other.ur.y)))\n else:\n return Rect(Point(0, 0), Point(0, 0))\n",
"step-3": "<mask token>\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\nclass Point(object):\n \"\"\"A point is an ordered pair, (x,y)\"\"\"\n\n def __init__(self, x, y):\n assert isinstance(x, numbers.Number)\n assert isinstance(y, numbers.Number)\n self.x = x\n self.y = y\n log.debug('Created Point {}'.format(repr(self)))\n\n def __repr__(self):\n return 'Point({},{})'.format(self.x, self.y)\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n\n def __eq__(self, other):\n assert isinstance(other, Point)\n return self.x == other.x and self.y == other.y\n\n\nclass Rect(object):\n \"\"\"A rectangle identified by its lower left\n and upper right corners.\n \"\"\"\n\n def __init__(self, ll, ur):\n \"\"\"Initialize rectangle with ll and ur as corners.\"\"\"\n log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))\n self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))\n log.debug('ll will be {}'.format(self.ll))\n self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))\n log.debug('ur will be {}'.format(self.ur))\n log.debug('Created rect {}'.format(repr(self)))\n\n def __repr__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __str__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __eq__(self, other):\n assert isinstance(other, Rect)\n return self.ll == other.ll and self.ur == other.ur\n\n def overlaps(self, other):\n \"\"\"r1.overlaps(r2) if there is an area of positive\n size within r1 and also within r2. \"Of positive size\"\n means that touching at a corner or along an edge is\n not enough ... the area of overlap must be positive.\n \"\"\"\n if self.ll.x >= other.ur.x:\n return False\n if self.ll.y >= other.ur.y:\n return False\n if self.ur.x <= other.ll.x:\n return False\n if self.ur.y <= other.ll.y:\n return False\n return True\n\n def intersect(self, other):\n \"\"\"Region of overlap, or (0,0),(0,0) if none\"\"\"\n if self.overlaps(other):\n return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,\n other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur\n .y, other.ur.y)))\n else:\n return Rect(Point(0, 0), Point(0, 0))\n",
"step-4": "<mask token>\nimport numbers\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\nclass Point(object):\n \"\"\"A point is an ordered pair, (x,y)\"\"\"\n\n def __init__(self, x, y):\n assert isinstance(x, numbers.Number)\n assert isinstance(y, numbers.Number)\n self.x = x\n self.y = y\n log.debug('Created Point {}'.format(repr(self)))\n\n def __repr__(self):\n return 'Point({},{})'.format(self.x, self.y)\n\n def __str__(self):\n return '({}, {})'.format(self.x, self.y)\n\n def __eq__(self, other):\n assert isinstance(other, Point)\n return self.x == other.x and self.y == other.y\n\n\nclass Rect(object):\n \"\"\"A rectangle identified by its lower left\n and upper right corners.\n \"\"\"\n\n def __init__(self, ll, ur):\n \"\"\"Initialize rectangle with ll and ur as corners.\"\"\"\n log.debug('Rect from ll {}, ur {}'.format(repr(ll), repr(ur)))\n self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))\n log.debug('ll will be {}'.format(self.ll))\n self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))\n log.debug('ur will be {}'.format(self.ur))\n log.debug('Created rect {}'.format(repr(self)))\n\n def __repr__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __str__(self):\n return 'Rect({},{})'.format(self.ll, self.ur)\n\n def __eq__(self, other):\n assert isinstance(other, Rect)\n return self.ll == other.ll and self.ur == other.ur\n\n def overlaps(self, other):\n \"\"\"r1.overlaps(r2) if there is an area of positive\n size within r1 and also within r2. \"Of positive size\"\n means that touching at a corner or along an edge is\n not enough ... the area of overlap must be positive.\n \"\"\"\n if self.ll.x >= other.ur.x:\n return False\n if self.ll.y >= other.ur.y:\n return False\n if self.ur.x <= other.ll.x:\n return False\n if self.ur.y <= other.ll.y:\n return False\n return True\n\n def intersect(self, other):\n \"\"\"Region of overlap, or (0,0),(0,0) if none\"\"\"\n if self.overlaps(other):\n return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y,\n other.ll.y)), Point(min(self.ur.x, other.ur.x), min(self.ur\n .y, other.ur.y)))\n else:\n return Rect(Point(0, 0), Point(0, 0))\n",
"step-5": "\"\"\"\nRectangles: Compute overlapping region of two rectangles.\n Point(x: number, y: number): Cartesian coordinate pair\n Rect(ll: Point, ur: Point): A rectangle defined by lower left\n and upper right coordinates\n Rect.overlaps(other: Rect) -> boolean: True if non-empty overlap\n Rect.intersect(other: Rect) -> Rect:\n region of intersection if non-empty,\n or empty Rect from 0,0 to 0,0 if not Rect.overlaps(other)\n\nCIS 211 Project 1\nAuthor: Noah Tigner\nUO email: nzt@uoregon.edu\n\"\"\"\n\nimport numbers\n\nimport logging\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n# To turn on debugging output, change the above to\n# log.setLevel(logging.DEBUG)\n\n\nclass Point(object):\n \"\"\"A point is an ordered pair, (x,y)\"\"\"\n\n def __init__(self, x, y):\n assert isinstance(x, numbers.Number)\n assert isinstance(y, numbers.Number)\n self.x = x\n self.y = y\n log.debug(\"Created Point {}\".format(repr(self)))\n\n def __repr__(self):\n return \"Point({},{})\".format(self.x, self.y)\n\n def __str__(self):\n return \"({}, {})\".format(self.x, self.y)\n\n def __eq__(self, other):\n assert(isinstance(other, Point))\n return self.x == other.x and self.y == other.y\n\n\nclass Rect(object):\n \"\"\"A rectangle identified by its lower left\n and upper right corners.\n \"\"\"\n\n def __init__(self, ll, ur):\n \"\"\"Initialize rectangle with ll and ur as corners.\"\"\"\n log.debug(\"Rect from ll {}, ur {}\".format(repr(ll), repr(ur)))\n # Ensure ll really is lower left and ur really is upper right\n self.ll = Point(min(ll.x, ur.x), min(ll.y, ur.y))\n log.debug(\"ll will be {}\".format(self.ll))\n self.ur = Point(max(ll.x, ur.x), max(ll.y, ur.y))\n log.debug(\"ur will be {}\".format(self.ur))\n log.debug(\"Created rect {}\".format(repr(self)))\n\n def __repr__(self):\n return \"Rect({},{})\".format(self.ll, self.ur)\n\n def __str__(self):\n return \"Rect({},{})\".format(self.ll, self.ur)\n\n def __eq__(self, other):\n assert isinstance(other, Rect)\n return self.ll == other.ll and self.ur == other.ur\n\n def overlaps(self, other):\n \"\"\"r1.overlaps(r2) if there is an area of positive\n size within r1 and also within r2. \"Of positive size\"\n means that touching at a corner or along an edge is\n not enough ... the area of overlap must be positive.\n \"\"\"\n\n if self.ll.x >= other.ur.x:\n return False\n \n if self.ll.y >= other.ur.y:\n return False\n \n if self.ur.x <= other.ll.x:\n return False\n \n if self.ur.y <= other.ll.y:\n return False\n \n return True\n \n \n def intersect(self, other):\n \"\"\"Region of overlap, or (0,0),(0,0) if none\"\"\"\n \n if self.overlaps(other):\n return Rect(Point(max(self.ll.x, other.ll.x), max(self.ll.y, other.ll.y)),\n Point(min(self.ur.x, other.ur.x), min(self.ur.y, other.ur.y)))\n\n\n else:\n return Rect(Point(0, 0), Point(0, 0))\n",
"step-ids": [
9,
12,
16,
17,
18
]
}
|
[
9,
12,
16,
17,
18
] |
import pickle
import torch
data = pickle.load(open('dd0eb7901523d494d4aa324f474c782063e9e231.p', 'rb'))
torch.nn.functional.adaptive_avg_pool3d(**data)
|
normal
|
{
"blob_id": "20d09a616133295a6162a7ab1d7970ccbaf6de95",
"index": 1331,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntorch.nn.functional.adaptive_avg_pool3d(**data)\n",
"step-3": "<mask token>\ndata = pickle.load(open('dd0eb7901523d494d4aa324f474c782063e9e231.p', 'rb'))\ntorch.nn.functional.adaptive_avg_pool3d(**data)\n",
"step-4": "import pickle\nimport torch\ndata = pickle.load(open('dd0eb7901523d494d4aa324f474c782063e9e231.p', 'rb'))\ntorch.nn.functional.adaptive_avg_pool3d(**data)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Article:
<|reserved_special_token_0|>
title: str
target: str
g: float
f: float
parent: typing.Union[Article, Type(None)]
heuristic: Callable[[str, str], float]
def __init__(self, title: str, target: str, parent: typing.Union[
Article, Type(None)], heuristic: Callable[[str, str], float]):
"""
Initializes based on [urls/titles/nodes]
"""
self.title = title
self.target = target
self.heuristic = heuristic
if parent:
self.parent = parent
self.g = parent.g + 1
else:
self.parent = None
self.g = 0
h = self.heuristic(title, target)
self.f = self.g + h
def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:
"""
Return list of connected (children) article object using the wikipedia API functions.
"""
s = requests.Session()
url = 'https://en.wikipedia.org/w/api.php'
if cont is None:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max'}
else:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}
titles_so_far = []
r = s.get(url=url, params=params)
data = r.json()
pages = data['query']['pages']
for k, v in pages.items():
if 'links' not in v:
return []
for l in v['links']:
titles_so_far.append(l['title'])
if 'batchcomplete' in data:
return titles_so_far
else:
contHolder = data['continue']['plcontinue']
titles_so_far.extend(self.get_children(contHolder))
return titles_so_far
def get_first_x(self, lst: List, x: int) ->List:
lst_so_far = []
for i in range(x):
lst_so_far.append(lst[i])
return lst_so_far
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __eq__(self, other):
return compare_titles(self.title, other.title)
def __ne__(self, other):
return not compare_titles(self.title, other.title)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PQ:
"""
MinHeap implementation of a priority queue for A* search.
"""
heap = []
def __init__(self):
self.heap = []
def insert(self, to_insert: Article) ->None:
"""
Insert new element in Priority queue
"""
heapq.heappush(self.heap, to_insert)
def pop(self) ->Article:
"""
pops minimum element from priority queue
"""
return heapq.heappop(self.heap)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Article:
<|reserved_special_token_0|>
title: str
target: str
g: float
f: float
parent: typing.Union[Article, Type(None)]
heuristic: Callable[[str, str], float]
def __init__(self, title: str, target: str, parent: typing.Union[
Article, Type(None)], heuristic: Callable[[str, str], float]):
"""
Initializes based on [urls/titles/nodes]
"""
self.title = title
self.target = target
self.heuristic = heuristic
if parent:
self.parent = parent
self.g = parent.g + 1
else:
self.parent = None
self.g = 0
h = self.heuristic(title, target)
self.f = self.g + h
def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:
"""
Return list of connected (children) article object using the wikipedia API functions.
"""
s = requests.Session()
url = 'https://en.wikipedia.org/w/api.php'
if cont is None:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max'}
else:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}
titles_so_far = []
r = s.get(url=url, params=params)
data = r.json()
pages = data['query']['pages']
for k, v in pages.items():
if 'links' not in v:
return []
for l in v['links']:
titles_so_far.append(l['title'])
if 'batchcomplete' in data:
return titles_so_far
else:
contHolder = data['continue']['plcontinue']
titles_so_far.extend(self.get_children(contHolder))
return titles_so_far
def get_first_x(self, lst: List, x: int) ->List:
lst_so_far = []
for i in range(x):
lst_so_far.append(lst[i])
return lst_so_far
def __lt__(self, other):
return self.f < other.f
def __le__(self, other):
return self.f <= other.f
def __eq__(self, other):
return compare_titles(self.title, other.title)
def __ne__(self, other):
return not compare_titles(self.title, other.title)
<|reserved_special_token_0|>
def __ge__(self, other):
return self.f >= other.f
class PQ:
"""
MinHeap implementation of a priority queue for A* search.
"""
heap = []
def __init__(self):
self.heap = []
def insert(self, to_insert: Article) ->None:
"""
Insert new element in Priority queue
"""
heapq.heappush(self.heap, to_insert)
def pop(self) ->Article:
"""
pops minimum element from priority queue
"""
return heapq.heappop(self.heap)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Article:
"""
This is the article class that represents each Wikipedia article.
Instance Variables:
- title: str that represents the title of the article
- target: the final target given by the user
- g:
- f:
"""
title: str
target: str
g: float
f: float
parent: typing.Union[Article, Type(None)]
heuristic: Callable[[str, str], float]
def __init__(self, title: str, target: str, parent: typing.Union[
Article, Type(None)], heuristic: Callable[[str, str], float]):
"""
Initializes based on [urls/titles/nodes]
"""
self.title = title
self.target = target
self.heuristic = heuristic
if parent:
self.parent = parent
self.g = parent.g + 1
else:
self.parent = None
self.g = 0
h = self.heuristic(title, target)
self.f = self.g + h
def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:
"""
Return list of connected (children) article object using the wikipedia API functions.
"""
s = requests.Session()
url = 'https://en.wikipedia.org/w/api.php'
if cont is None:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max'}
else:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}
titles_so_far = []
r = s.get(url=url, params=params)
data = r.json()
pages = data['query']['pages']
for k, v in pages.items():
if 'links' not in v:
return []
for l in v['links']:
titles_so_far.append(l['title'])
if 'batchcomplete' in data:
return titles_so_far
else:
contHolder = data['continue']['plcontinue']
titles_so_far.extend(self.get_children(contHolder))
return titles_so_far
def get_first_x(self, lst: List, x: int) ->List:
lst_so_far = []
for i in range(x):
lst_so_far.append(lst[i])
return lst_so_far
def __lt__(self, other):
return self.f < other.f
def __le__(self, other):
return self.f <= other.f
def __eq__(self, other):
return compare_titles(self.title, other.title)
def __ne__(self, other):
return not compare_titles(self.title, other.title)
def __gt__(self, other):
return self.f > other.f
def __ge__(self, other):
return self.f >= other.f
class PQ:
"""
MinHeap implementation of a priority queue for A* search.
"""
heap = []
def __init__(self):
self.heap = []
def insert(self, to_insert: Article) ->None:
"""
Insert new element in Priority queue
"""
heapq.heappush(self.heap, to_insert)
def pop(self) ->Article:
"""
pops minimum element from priority queue
"""
return heapq.heappop(self.heap)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def heuristic_2(a: str, b: str) ->float:
"""
Returns predicted cost (distance) from two titles a to b, through the cosine similarity of two generated
term-document matrices of the article. The heuristic in this case is purely semantic.
The HTML enriched query for the JSON is:
https://en.wikipedia.org/w/api.php?action=query&titles=TITLE&prop=extracts&format=json&exintro=1
"""
if get_intro(a) == '' or get_intro(b) == '':
return 2
else:
corpus = [get_intro(a), get_intro(b)]
vect = TfidfVectorizer()
mat = vect.fit_transform(corpus)
return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2
class Article:
"""
This is the article class that represents each Wikipedia article.
Instance Variables:
- title: str that represents the title of the article
- target: the final target given by the user
- g:
- f:
"""
title: str
target: str
g: float
f: float
parent: typing.Union[Article, Type(None)]
heuristic: Callable[[str, str], float]
def __init__(self, title: str, target: str, parent: typing.Union[
Article, Type(None)], heuristic: Callable[[str, str], float]):
"""
Initializes based on [urls/titles/nodes]
"""
self.title = title
self.target = target
self.heuristic = heuristic
if parent:
self.parent = parent
self.g = parent.g + 1
else:
self.parent = None
self.g = 0
h = self.heuristic(title, target)
self.f = self.g + h
def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:
"""
Return list of connected (children) article object using the wikipedia API functions.
"""
s = requests.Session()
url = 'https://en.wikipedia.org/w/api.php'
if cont is None:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max'}
else:
params = {'action': 'query', 'format': 'json', 'titles': self.
title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}
titles_so_far = []
r = s.get(url=url, params=params)
data = r.json()
pages = data['query']['pages']
for k, v in pages.items():
if 'links' not in v:
return []
for l in v['links']:
titles_so_far.append(l['title'])
if 'batchcomplete' in data:
return titles_so_far
else:
contHolder = data['continue']['plcontinue']
titles_so_far.extend(self.get_children(contHolder))
return titles_so_far
def get_first_x(self, lst: List, x: int) ->List:
lst_so_far = []
for i in range(x):
lst_so_far.append(lst[i])
return lst_so_far
def __lt__(self, other):
return self.f < other.f
def __le__(self, other):
return self.f <= other.f
def __eq__(self, other):
return compare_titles(self.title, other.title)
def __ne__(self, other):
return not compare_titles(self.title, other.title)
def __gt__(self, other):
return self.f > other.f
def __ge__(self, other):
return self.f >= other.f
class PQ:
"""
MinHeap implementation of a priority queue for A* search.
"""
heap = []
def __init__(self):
self.heap = []
def insert(self, to_insert: Article) ->None:
"""
Insert new element in Priority queue
"""
heapq.heappush(self.heap, to_insert)
def pop(self) ->Article:
"""
pops minimum element from priority queue
"""
return heapq.heappop(self.heap)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from __future__ import annotations
import typing
import requests
import heapq
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from bs4 import BeautifulSoup
from wikiAPI import get_JSON, get_intro, compare_titles
from typing import List, Type, Callable
def heuristic_0(a: str, b: str) -> float:
return 2
def heuristic_1(a: str, b: str) -> float:
"""
Returns predicted cost (distance) from two titles a to b, through the cosine similarity of two generated
term-document matrices of the article. The heuristic in this case is purely semantic.
The HTML enriched query for the JSON is:
https://en.wikipedia.org/w/api.php?action=parse&page=TITLE&prop=text&formatversion=2&format=json
"""
query = "https://en.wikipedia.org/w/api.php?action=parse&page=TEMP&prop=text&formatversion=2&format=json"
startTitle = (a.replace(" ", "%20")).replace("&", "%26")
endTitle = (b.replace(" ", "%20")).replace("&", "%26")
startURL = (query.replace("TEMP", startTitle))
endURL = (query.replace("TEMP", endTitle))
# text processing using SOUP
initialSoup = BeautifulSoup(get_JSON(startURL)['parse']['text'], 'html.parser')
finalSoup = BeautifulSoup(get_JSON(endURL)['parse']['text'], 'html.parser')
# generate term-document matrices
corpus = [initialSoup.get_text().replace('\n', ' '), finalSoup.get_text().replace('\n', ' ')]
vect = TfidfVectorizer()
mat = vect.fit_transform(corpus)
# return cosine similarity
return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2
def heuristic_2(a: str, b: str) -> float:
"""
Returns predicted cost (distance) from two titles a to b, through the cosine similarity of two generated
term-document matrices of the article. The heuristic in this case is purely semantic.
The HTML enriched query for the JSON is:
https://en.wikipedia.org/w/api.php?action=query&titles=TITLE&prop=extracts&format=json&exintro=1
"""
# generate term-document matrices
if get_intro(a) == "" or get_intro(b) == "":
return 2
else:
corpus = [get_intro(a), get_intro(b)]
vect = TfidfVectorizer()
mat = vect.fit_transform(corpus)
# return cosine similarity
return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2
# def semantic_similarity(a: str, b: str) -> float:
# web_model = WebBertSimilarity(device='cpu', batch_size=10)
# return web_model.predict([(a, b)])
class Article:
"""
This is the article class that represents each Wikipedia article.
Instance Variables:
- title: str that represents the title of the article
- target: the final target given by the user
- g:
- f:
"""
title: str
target: str
g: float
f: float
parent: typing.Union[Article, Type(None)]
heuristic: Callable[[str, str], float]
def __init__(self, title: str, target: str, parent: typing.Union[Article, Type(None)], heuristic: Callable[[str, str], float] ):
"""
Initializes based on [urls/titles/nodes]
"""
self.title = title
self.target = target
self.heuristic = heuristic
if parent:
self.parent = parent
self.g = parent.g + 1
else:
self.parent = None
self.g = 0
h = self.heuristic(title, target)
self.f = self.g + h
def get_children(self, cont: typing.Union[str, Type(None)]) -> List[str]:
"""
Return list of connected (children) article object using the wikipedia API functions.
"""
s = requests.Session()
url = "https://en.wikipedia.org/w/api.php"
if cont is None:
params = {
"action": "query",
"format": "json",
"titles": self.title,
"prop": "links",
"pllimit": "max"
}
else:
params = {
"action": "query",
"format": "json",
"titles": self.title,
"prop": "links",
"pllimit": "max",
"plcontinue": cont
}
titles_so_far = []
r = s.get(url=url, params=params)
data = r.json()
pages = data["query"]["pages"]
for k, v in pages.items():
if "links" not in v:
return []
for l in v["links"]:
titles_so_far.append(l["title"])
if "batchcomplete" in data:
return titles_so_far
else:
contHolder = data["continue"]["plcontinue"]
titles_so_far.extend(self.get_children(contHolder))
return titles_so_far
# return [Article(child, self.target, self.title) for child in titles_so_far]
def get_first_x(self, lst: List, x: int) -> List:
lst_so_far = []
for i in range(x):
lst_so_far.append(lst[i])
return lst_so_far
def __lt__(self, other):
return self.f < other.f
def __le__(self, other):
return self.f <= other.f
def __eq__(self, other):
return compare_titles(self.title, other.title)
def __ne__(self, other):
return not compare_titles(self.title, other.title)
def __gt__(self, other):
return self.f > other.f
def __ge__(self, other):
return self.f >= other.f
class PQ:
"""
MinHeap implementation of a priority queue for A* search.
"""
heap = []
def __init__(self):
self.heap = []
def insert(self, to_insert: Article) -> None:
"""
Insert new element in Priority queue
"""
heapq.heappush(self.heap, to_insert)
def pop(self) -> Article:
"""
pops minimum element from priority queue
"""
return heapq.heappop(self.heap)
def a_star(source: str, target: str, heuristic: Callable[[str, str], float]) -> list:
"""
Returns path from source to target using A* search algorithm.
"""
visited: set = set((source))
cur: Article = Article(source, target, None, heuristic)
queue = PQ()
while not compare_titles(cur.title, target):
nexts = cur.get_children(None)
for next in nexts:
if next not in visited:
article = Article(next, target, cur, heuristic)
queue.insert(article)
visited.add(next)
print(article.f, article.title)
cur = queue.pop()
print("CUR:", cur.f, cur.title)
path = [cur.title]
while path[0] != source:
cur = cur.parent
path.insert(0, cur.title)
return path
# print(a_star("Dog", "Aardwolf", heuristic_2))
|
flexible
|
{
"blob_id": "1fad591fde707c73bd52aa8518828c8b8be9cd32",
"index": 2283,
"step-1": "<mask token>\n\n\nclass Article:\n <mask token>\n title: str\n target: str\n g: float\n f: float\n parent: typing.Union[Article, Type(None)]\n heuristic: Callable[[str, str], float]\n\n def __init__(self, title: str, target: str, parent: typing.Union[\n Article, Type(None)], heuristic: Callable[[str, str], float]):\n \"\"\"\n Initializes based on [urls/titles/nodes]\n \"\"\"\n self.title = title\n self.target = target\n self.heuristic = heuristic\n if parent:\n self.parent = parent\n self.g = parent.g + 1\n else:\n self.parent = None\n self.g = 0\n h = self.heuristic(title, target)\n self.f = self.g + h\n\n def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:\n \"\"\"\n Return list of connected (children) article object using the wikipedia API functions.\n \"\"\"\n s = requests.Session()\n url = 'https://en.wikipedia.org/w/api.php'\n if cont is None:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max'}\n else:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}\n titles_so_far = []\n r = s.get(url=url, params=params)\n data = r.json()\n pages = data['query']['pages']\n for k, v in pages.items():\n if 'links' not in v:\n return []\n for l in v['links']:\n titles_so_far.append(l['title'])\n if 'batchcomplete' in data:\n return titles_so_far\n else:\n contHolder = data['continue']['plcontinue']\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n def get_first_x(self, lst: List, x: int) ->List:\n lst_so_far = []\n for i in range(x):\n lst_so_far.append(lst[i])\n return lst_so_far\n <mask token>\n <mask token>\n\n def __eq__(self, other):\n return compare_titles(self.title, other.title)\n\n def __ne__(self, other):\n return not compare_titles(self.title, other.title)\n <mask token>\n <mask token>\n\n\nclass PQ:\n \"\"\"\n MinHeap implementation of a priority queue for A* search.\n \"\"\"\n heap = []\n\n def __init__(self):\n self.heap = []\n\n def insert(self, to_insert: Article) ->None:\n \"\"\"\n Insert new element in Priority queue\n \"\"\"\n heapq.heappush(self.heap, to_insert)\n\n def pop(self) ->Article:\n \"\"\"\n pops minimum element from priority queue\n \"\"\"\n return heapq.heappop(self.heap)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Article:\n <mask token>\n title: str\n target: str\n g: float\n f: float\n parent: typing.Union[Article, Type(None)]\n heuristic: Callable[[str, str], float]\n\n def __init__(self, title: str, target: str, parent: typing.Union[\n Article, Type(None)], heuristic: Callable[[str, str], float]):\n \"\"\"\n Initializes based on [urls/titles/nodes]\n \"\"\"\n self.title = title\n self.target = target\n self.heuristic = heuristic\n if parent:\n self.parent = parent\n self.g = parent.g + 1\n else:\n self.parent = None\n self.g = 0\n h = self.heuristic(title, target)\n self.f = self.g + h\n\n def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:\n \"\"\"\n Return list of connected (children) article object using the wikipedia API functions.\n \"\"\"\n s = requests.Session()\n url = 'https://en.wikipedia.org/w/api.php'\n if cont is None:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max'}\n else:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}\n titles_so_far = []\n r = s.get(url=url, params=params)\n data = r.json()\n pages = data['query']['pages']\n for k, v in pages.items():\n if 'links' not in v:\n return []\n for l in v['links']:\n titles_so_far.append(l['title'])\n if 'batchcomplete' in data:\n return titles_so_far\n else:\n contHolder = data['continue']['plcontinue']\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n def get_first_x(self, lst: List, x: int) ->List:\n lst_so_far = []\n for i in range(x):\n lst_so_far.append(lst[i])\n return lst_so_far\n\n def __lt__(self, other):\n return self.f < other.f\n\n def __le__(self, other):\n return self.f <= other.f\n\n def __eq__(self, other):\n return compare_titles(self.title, other.title)\n\n def __ne__(self, other):\n return not compare_titles(self.title, other.title)\n <mask token>\n\n def __ge__(self, other):\n return self.f >= other.f\n\n\nclass PQ:\n \"\"\"\n MinHeap implementation of a priority queue for A* search.\n \"\"\"\n heap = []\n\n def __init__(self):\n self.heap = []\n\n def insert(self, to_insert: Article) ->None:\n \"\"\"\n Insert new element in Priority queue\n \"\"\"\n heapq.heappush(self.heap, to_insert)\n\n def pop(self) ->Article:\n \"\"\"\n pops minimum element from priority queue\n \"\"\"\n return heapq.heappop(self.heap)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Article:\n \"\"\"\n This is the article class that represents each Wikipedia article.\n\n Instance Variables:\n - title: str that represents the title of the article\n - target: the final target given by the user\n - g:\n - f:\n \"\"\"\n title: str\n target: str\n g: float\n f: float\n parent: typing.Union[Article, Type(None)]\n heuristic: Callable[[str, str], float]\n\n def __init__(self, title: str, target: str, parent: typing.Union[\n Article, Type(None)], heuristic: Callable[[str, str], float]):\n \"\"\"\n Initializes based on [urls/titles/nodes]\n \"\"\"\n self.title = title\n self.target = target\n self.heuristic = heuristic\n if parent:\n self.parent = parent\n self.g = parent.g + 1\n else:\n self.parent = None\n self.g = 0\n h = self.heuristic(title, target)\n self.f = self.g + h\n\n def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:\n \"\"\"\n Return list of connected (children) article object using the wikipedia API functions.\n \"\"\"\n s = requests.Session()\n url = 'https://en.wikipedia.org/w/api.php'\n if cont is None:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max'}\n else:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}\n titles_so_far = []\n r = s.get(url=url, params=params)\n data = r.json()\n pages = data['query']['pages']\n for k, v in pages.items():\n if 'links' not in v:\n return []\n for l in v['links']:\n titles_so_far.append(l['title'])\n if 'batchcomplete' in data:\n return titles_so_far\n else:\n contHolder = data['continue']['plcontinue']\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n def get_first_x(self, lst: List, x: int) ->List:\n lst_so_far = []\n for i in range(x):\n lst_so_far.append(lst[i])\n return lst_so_far\n\n def __lt__(self, other):\n return self.f < other.f\n\n def __le__(self, other):\n return self.f <= other.f\n\n def __eq__(self, other):\n return compare_titles(self.title, other.title)\n\n def __ne__(self, other):\n return not compare_titles(self.title, other.title)\n\n def __gt__(self, other):\n return self.f > other.f\n\n def __ge__(self, other):\n return self.f >= other.f\n\n\nclass PQ:\n \"\"\"\n MinHeap implementation of a priority queue for A* search.\n \"\"\"\n heap = []\n\n def __init__(self):\n self.heap = []\n\n def insert(self, to_insert: Article) ->None:\n \"\"\"\n Insert new element in Priority queue\n \"\"\"\n heapq.heappush(self.heap, to_insert)\n\n def pop(self) ->Article:\n \"\"\"\n pops minimum element from priority queue\n \"\"\"\n return heapq.heappop(self.heap)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef heuristic_2(a: str, b: str) ->float:\n \"\"\"\n Returns predicted cost (distance) from two titles a to b, through the cosine similarity of two generated\n term-document matrices of the article. The heuristic in this case is purely semantic.\n\n The HTML enriched query for the JSON is:\n https://en.wikipedia.org/w/api.php?action=query&titles=TITLE&prop=extracts&format=json&exintro=1\n \"\"\"\n if get_intro(a) == '' or get_intro(b) == '':\n return 2\n else:\n corpus = [get_intro(a), get_intro(b)]\n vect = TfidfVectorizer()\n mat = vect.fit_transform(corpus)\n return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2\n\n\nclass Article:\n \"\"\"\n This is the article class that represents each Wikipedia article.\n\n Instance Variables:\n - title: str that represents the title of the article\n - target: the final target given by the user\n - g:\n - f:\n \"\"\"\n title: str\n target: str\n g: float\n f: float\n parent: typing.Union[Article, Type(None)]\n heuristic: Callable[[str, str], float]\n\n def __init__(self, title: str, target: str, parent: typing.Union[\n Article, Type(None)], heuristic: Callable[[str, str], float]):\n \"\"\"\n Initializes based on [urls/titles/nodes]\n \"\"\"\n self.title = title\n self.target = target\n self.heuristic = heuristic\n if parent:\n self.parent = parent\n self.g = parent.g + 1\n else:\n self.parent = None\n self.g = 0\n h = self.heuristic(title, target)\n self.f = self.g + h\n\n def get_children(self, cont: typing.Union[str, Type(None)]) ->List[str]:\n \"\"\"\n Return list of connected (children) article object using the wikipedia API functions.\n \"\"\"\n s = requests.Session()\n url = 'https://en.wikipedia.org/w/api.php'\n if cont is None:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max'}\n else:\n params = {'action': 'query', 'format': 'json', 'titles': self.\n title, 'prop': 'links', 'pllimit': 'max', 'plcontinue': cont}\n titles_so_far = []\n r = s.get(url=url, params=params)\n data = r.json()\n pages = data['query']['pages']\n for k, v in pages.items():\n if 'links' not in v:\n return []\n for l in v['links']:\n titles_so_far.append(l['title'])\n if 'batchcomplete' in data:\n return titles_so_far\n else:\n contHolder = data['continue']['plcontinue']\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n def get_first_x(self, lst: List, x: int) ->List:\n lst_so_far = []\n for i in range(x):\n lst_so_far.append(lst[i])\n return lst_so_far\n\n def __lt__(self, other):\n return self.f < other.f\n\n def __le__(self, other):\n return self.f <= other.f\n\n def __eq__(self, other):\n return compare_titles(self.title, other.title)\n\n def __ne__(self, other):\n return not compare_titles(self.title, other.title)\n\n def __gt__(self, other):\n return self.f > other.f\n\n def __ge__(self, other):\n return self.f >= other.f\n\n\nclass PQ:\n \"\"\"\n MinHeap implementation of a priority queue for A* search.\n \"\"\"\n heap = []\n\n def __init__(self):\n self.heap = []\n\n def insert(self, to_insert: Article) ->None:\n \"\"\"\n Insert new element in Priority queue\n \"\"\"\n heapq.heappush(self.heap, to_insert)\n\n def pop(self) ->Article:\n \"\"\"\n pops minimum element from priority queue\n \"\"\"\n return heapq.heappop(self.heap)\n\n\n<mask token>\n",
"step-5": "from __future__ import annotations\nimport typing\nimport requests\nimport heapq\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom bs4 import BeautifulSoup\nfrom wikiAPI import get_JSON, get_intro, compare_titles\nfrom typing import List, Type, Callable\n\n\ndef heuristic_0(a: str, b: str) -> float:\n return 2\n\n\ndef heuristic_1(a: str, b: str) -> float:\n \"\"\"\n Returns predicted cost (distance) from two titles a to b, through the cosine similarity of two generated\n term-document matrices of the article. The heuristic in this case is purely semantic.\n\n The HTML enriched query for the JSON is:\n https://en.wikipedia.org/w/api.php?action=parse&page=TITLE&prop=text&formatversion=2&format=json\n \"\"\"\n query = \"https://en.wikipedia.org/w/api.php?action=parse&page=TEMP&prop=text&formatversion=2&format=json\"\n startTitle = (a.replace(\" \", \"%20\")).replace(\"&\", \"%26\")\n endTitle = (b.replace(\" \", \"%20\")).replace(\"&\", \"%26\")\n startURL = (query.replace(\"TEMP\", startTitle))\n endURL = (query.replace(\"TEMP\", endTitle))\n # text processing using SOUP\n initialSoup = BeautifulSoup(get_JSON(startURL)['parse']['text'], 'html.parser')\n finalSoup = BeautifulSoup(get_JSON(endURL)['parse']['text'], 'html.parser')\n # generate term-document matrices\n corpus = [initialSoup.get_text().replace('\\n', ' '), finalSoup.get_text().replace('\\n', ' ')]\n vect = TfidfVectorizer()\n mat = vect.fit_transform(corpus)\n # return cosine similarity\n return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2\n\n\ndef heuristic_2(a: str, b: str) -> float:\n \"\"\"\n Returns predicted cost (distance) from two titles a to b, through the cosine similarity of two generated\n term-document matrices of the article. The heuristic in this case is purely semantic.\n\n The HTML enriched query for the JSON is:\n https://en.wikipedia.org/w/api.php?action=query&titles=TITLE&prop=extracts&format=json&exintro=1\n \"\"\"\n # generate term-document matrices\n if get_intro(a) == \"\" or get_intro(b) == \"\":\n return 2\n else:\n corpus = [get_intro(a), get_intro(b)]\n vect = TfidfVectorizer()\n mat = vect.fit_transform(corpus)\n # return cosine similarity\n return abs(1 - cosine_similarity(mat[0:1], mat)[0][1]) * 2\n\n\n# def semantic_similarity(a: str, b: str) -> float:\n # web_model = WebBertSimilarity(device='cpu', batch_size=10)\n # return web_model.predict([(a, b)])\n\n\nclass Article:\n \"\"\"\n This is the article class that represents each Wikipedia article.\n\n Instance Variables:\n - title: str that represents the title of the article\n - target: the final target given by the user\n - g:\n - f:\n \"\"\"\n title: str\n target: str\n g: float\n f: float\n parent: typing.Union[Article, Type(None)]\n heuristic: Callable[[str, str], float]\n\n def __init__(self, title: str, target: str, parent: typing.Union[Article, Type(None)], heuristic: Callable[[str, str], float] ):\n \"\"\"\n Initializes based on [urls/titles/nodes]\n \"\"\"\n self.title = title\n self.target = target\n self.heuristic = heuristic\n\n if parent:\n self.parent = parent\n self.g = parent.g + 1\n else:\n self.parent = None\n self.g = 0\n\n h = self.heuristic(title, target)\n self.f = self.g + h\n\n def get_children(self, cont: typing.Union[str, Type(None)]) -> List[str]:\n \"\"\"\n Return list of connected (children) article object using the wikipedia API functions.\n \"\"\"\n s = requests.Session()\n\n url = \"https://en.wikipedia.org/w/api.php\"\n\n if cont is None:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\"\n }\n else:\n params = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"titles\": self.title,\n \"prop\": \"links\",\n \"pllimit\": \"max\",\n \"plcontinue\": cont\n }\n\n titles_so_far = []\n\n r = s.get(url=url, params=params)\n data = r.json()\n\n pages = data[\"query\"][\"pages\"]\n\n for k, v in pages.items():\n if \"links\" not in v:\n return []\n\n for l in v[\"links\"]:\n titles_so_far.append(l[\"title\"])\n\n if \"batchcomplete\" in data:\n return titles_so_far\n else:\n contHolder = data[\"continue\"][\"plcontinue\"]\n titles_so_far.extend(self.get_children(contHolder))\n return titles_so_far\n\n # return [Article(child, self.target, self.title) for child in titles_so_far]\n\n def get_first_x(self, lst: List, x: int) -> List:\n lst_so_far = []\n\n for i in range(x):\n lst_so_far.append(lst[i])\n\n return lst_so_far\n\n def __lt__(self, other):\n return self.f < other.f\n\n def __le__(self, other):\n return self.f <= other.f\n\n def __eq__(self, other):\n return compare_titles(self.title, other.title)\n\n def __ne__(self, other):\n return not compare_titles(self.title, other.title)\n\n def __gt__(self, other):\n return self.f > other.f\n\n def __ge__(self, other):\n return self.f >= other.f\n\nclass PQ:\n \"\"\"\n MinHeap implementation of a priority queue for A* search.\n \"\"\"\n heap = []\n\n def __init__(self):\n self.heap = []\n\n def insert(self, to_insert: Article) -> None:\n \"\"\"\n Insert new element in Priority queue\n \"\"\"\n heapq.heappush(self.heap, to_insert)\n\n def pop(self) -> Article:\n \"\"\"\n pops minimum element from priority queue\n \"\"\"\n return heapq.heappop(self.heap)\n\n\ndef a_star(source: str, target: str, heuristic: Callable[[str, str], float]) -> list:\n \"\"\"\n Returns path from source to target using A* search algorithm.\n \"\"\"\n visited: set = set((source))\n cur: Article = Article(source, target, None, heuristic)\n queue = PQ()\n\n while not compare_titles(cur.title, target):\n nexts = cur.get_children(None)\n for next in nexts:\n if next not in visited:\n article = Article(next, target, cur, heuristic)\n queue.insert(article)\n visited.add(next)\n print(article.f, article.title)\n cur = queue.pop()\n print(\"CUR:\", cur.f, cur.title)\n\n path = [cur.title]\n\n while path[0] != source:\n cur = cur.parent\n path.insert(0, cur.title)\n\n return path\n\n# print(a_star(\"Dog\", \"Aardwolf\", heuristic_2))\n",
"step-ids": [
12,
15,
17,
18,
23
]
}
|
[
12,
15,
17,
18,
23
] |
import random
from datetime import datetime
from slackbot.bot import respond_to
from .term_model import Term, Response
from ..botmessage import botsend, botwebapi
# すでに存在するコマンドは無視する
RESERVED = (
'drive', 'manual', 'jira', 'wikipedia', 'plusplus',
'translate', '翻訳',
'weather', '天気',
'term',
'shuffle', 'help', 'choice', 'ping', 'version', 'random', 'cal',
'google', 'image', 'map', 'gadmin',
'github',
'suddendeath',
'pycamp',
'lgtm',
)
# コマンド一覧を初期化
commands = {term.command for term in Term.select()}
@respond_to('^term\s+([\w-]+)$')
@respond_to('^term\s+create\s+([\w-]+)$')
@respond_to('^term\s+add\s+([\w-]+)$')
def term_create(message, command):
"""
指定されたコマンドを生成する
"""
if command in ('list', 'help'):
return
# コマンドは小文字に統一
command = command.lower()
# 予約語の場合は実行しない
if command in RESERVED:
botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))
return
creator = message.body['user']
term, created = Term.get_or_create(command=command, creator=creator)
if not created:
# すでに登録してあるコマンドは登録しない
botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))
else:
msg = 'コマンド `${}` を作成しました。\n'.format(command)
msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)
botsend(message, msg)
# コマンド一覧の set に追加
commands.add(command)
@respond_to('^term\s+(drop|del|delete)\s+([\w-]+)$')
def term_drop(message, subcommand, command):
"""
指定されたコマンドを消去する
"""
# コマンドは小文字に統一
command = command.lower()
# コマンドの存在チェック
if not _available_command(message, command):
return
# 用語コマンドと応答をまとめて削除
term = Term.get(command=command)
term.delete_instance(recursive=True)
term.save()
# コマンド一覧の set から削除
commands.remove(command)
botsend(message, 'コマンド `${}` を消去しました'.format(command))
def _create_attachments_for_list(pretext, data, command=True):
"""
指定されたリストの一覧を message.send_webapi で送信するための
attachments を生成する
"""
if command:
# ['foo', 'bar', 'baz'] -> '`$far`, `$bar`, `$baz`'
list_text = ', '.join(['`${}`'.format(x) for x in data])
else:
list_text = '\n'.join([x for x in data])
attachments = [{
'pretext': pretext,
'text': list_text,
'mrkdwn_in': ['pretext', 'text'],
}]
return attachments
@respond_to('^term\s+search\s+([\w-]+)$')
def term_search(message, keyword):
"""
指定したキーワードを含む用語コマンドの一覧を返す
"""
pretext = '`{}` を含む用語コマンドの一覧です'.format(keyword)
data = []
for command in sorted(commands):
if keyword in command:
data.append(command)
attachments = _create_attachments_for_list(pretext, data)
botwebapi(message, attachments)
@respond_to('^term\s+list$')
def term_list(message):
"""
現在使用可能な用語コマンドの一覧を返す
"""
pretext = '用語コマンドの一覧です'
attachments = _create_attachments_for_list(pretext, sorted(commands))
botwebapi(message, attachments)
def _available_command(message, command):
"""
指定されたコマンドが有効化どうかを返す
"""
result = True
if command in RESERVED:
result = False
elif command not in commands:
botsend(message, 'コマンド `${}` は登録されていません'.format(command))
result = False
return result
def _send_markdown_text(message, text):
"""
指定されたtextをmarkdown形式で送信する
"""
attachments = [{
'pretext': text,
'mrkdwn_in': ['pretext'],
}]
botwebapi(message, attachments)
@respond_to('^([\w-]+)$')
def return_response(message, command):
"""
用語コマンドに登録されている応答をランダムに返す
"""
if not _available_command(message, command):
return
response_set = Term.get(command=command).response_set
if len(response_set) == 0:
msg = 'コマンド `${}` には応答が登録されていません\n'.format(command)
msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)
botsend(message, msg)
else:
response = random.choice(response_set)
_send_markdown_text(message, response.text)
@respond_to('^([\w-]+)\s+(.*)')
def response(message, command, params):
"""
用語コマンドの処理をする
"""
if not _available_command(message, command):
return
data = params.split(maxsplit=1)
subcommand = data[0]
try:
if subcommand == 'pop':
# 最後に登録された応答を削除
pop_response(message, command)
elif subcommand == 'list':
# 応答の一覧を返す
get_responses(message, command)
elif subcommand == 'search':
# 応答を検索
search_responses(message, command, data[1])
elif subcommand in ('del', 'delete', 'remove'):
# 応答を削除
del_response(message, command, data[1])
elif subcommand == 'add':
# 応答を追加
add_response(message, command, data[1])
else:
# サブコマンドが存在しない場合も追加
add_response(message, command, params)
except IndexError:
# ヘルプを返す
term_help(message)
pass
def _exist_response(command, text):
"""
指定されたコマンドに応答が登録されているかを調べて返す
"""
term = Term.get(command=command)
count = Response.select().where(Response.term == term,
Response.text == text).count()
if count == 0:
return False
else:
return True
def add_response(message, command, text):
"""
用語コマンドに応答を追加する
"""
# 登録済かどうかを確認する
if _exist_response(command, text):
reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)
_send_markdown_text(message, reply)
return
term = Term.get(command=command)
creator = message.body['user']
# 用語を登録する
resp, created = Response.get_or_create(term=term, text=text,
creator=creator,
created=datetime.now())
resp.save()
text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)
_send_markdown_text(message, text)
def del_response(message, command, text):
"""
用語コマンドから応答を削除する
"""
term = Term.get(command=command)
try:
response = Response.get(term=term, text=text)
except Response.DoesNotExist:
reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)
_send_markdown_text(message, reply)
return
# 応答を削除する
response.delete_instance()
reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)
_send_markdown_text(message, reply)
def pop_response(message, command):
"""
用語コマンドで最後に登録された応答を削除する
"""
response_set = Term.get(command=command).response_set
# 応答が登録されていない
if len(response_set) == 0:
msg = 'コマンド `${}` には応答が登録されていません\n'.format(command)
msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)
botsend(message, msg)
return
last_response = response_set.order_by(Response.created.desc())[0]
text = last_response.text
last_response.delete_instance()
reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)
_send_markdown_text(message, reply)
def search_responses(message, command, keyword):
"""
用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す
"""
term = Term.get(command=command)
pat = '%{}%'.format(keyword)
responses = Response.select().where(term == term, Response.text ** pat)
if len(responses) == 0:
botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command, keyword))
else:
pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\n'.format(
command, keyword, len(responses))
data = [x.text for x in responses]
attachments = _create_attachments_for_list(pretext, data, False)
botwebapi(message, attachments)
def get_responses(message, command):
"""
用語コマンドに登録されている応答の一覧を返す
"""
response_set = Term.get(command=command).response_set
if len(response_set) == 0:
msg = 'コマンド `${}` には応答が登録されていません\n'.format(command)
msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)
botsend(message, msg)
else:
pretext = 'コマンド `${}` の応答は {} 件あります\n'.format(
command, len(response_set))
data = [x.text for x in response_set]
attachments = _create_attachments_for_list(pretext, data, False)
botwebapi(message, attachments)
@respond_to('term\s+help')
def term_help(message):
"""
term pluginのヘルプを返す
"""
botsend(message, '''- `$term (用語)`: 用語コマンドを作成する
- `$term create (用語)`: 用語コマンドを作成する
- `$term drop (用語)`: 用語コマンドを消去する
- `$term search (キーワード)`: キーワードを含む用語コマンドの一覧を返す
- `$term list`: 用語コマンドの一覧を返す
- `$(用語)`: 用語コマンドに登録してある応答からランダムに一つ返す
- `$(用語) add (応答)`: 用語コマンドに応答を追加する
- `$(用語) del (応答)`: 用語コマンドから応答を削除する
- `$(用語) pop`: 用語コマンドの最後に登録した応答を削除する
- `$(用語) list`: 用語コマンドの応答一覧を返す
- `$(用語) search (キーワード)`: 用語コマンドのうちキーワードを含む応答一覧を返す
```
> $term create 酒
コマンド `$酒` を作成しました。
`$酒 add (レスポンス)` でレスポンスを追加できます
> $酒 add ビール
コマンド `$酒` に `ビール` を追加しました
> $酒 add ワイン
コマンド `$酒` に `ワイン` を追加しました
> $酒
ビール
```
''')
|
normal
|
{
"blob_id": "86e97e7eaf0d23ccf4154b5ffc853c5aee966326",
"index": 5769,
"step-1": "<mask token>\n\n\n@respond_to('^term\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+create\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+add\\\\s+([\\\\w-]+)$')\ndef term_create(message, command):\n \"\"\"\n 指定されたコマンドを生成する\n \"\"\"\n if command in ('list', 'help'):\n return\n command = command.lower()\n if command in RESERVED:\n botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))\n return\n creator = message.body['user']\n term, created = Term.get_or_create(command=command, creator=creator)\n if not created:\n botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))\n else:\n msg = 'コマンド `${}` を作成しました。\\n'.format(command)\n msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)\n botsend(message, msg)\n commands.add(command)\n\n\n<mask token>\n\n\n@respond_to('^term\\\\s+list$')\ndef term_list(message):\n \"\"\"\n 現在使用可能な用語コマンドの一覧を返す\n \"\"\"\n pretext = '用語コマンドの一覧です'\n attachments = _create_attachments_for_list(pretext, sorted(commands))\n botwebapi(message, attachments)\n\n\n<mask token>\n\n\ndef _send_markdown_text(message, text):\n \"\"\"\n 指定されたtextをmarkdown形式で送信する\n \"\"\"\n attachments = [{'pretext': text, 'mrkdwn_in': ['pretext']}]\n botwebapi(message, attachments)\n\n\n<mask token>\n\n\ndef add_response(message, command, text):\n \"\"\"\n 用語コマンドに応答を追加する\n \"\"\"\n if _exist_response(command, text):\n reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)\n _send_markdown_text(message, reply)\n return\n term = Term.get(command=command)\n creator = message.body['user']\n resp, created = Response.get_or_create(term=term, text=text, creator=\n creator, created=datetime.now())\n resp.save()\n text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)\n _send_markdown_text(message, text)\n\n\ndef del_response(message, command, text):\n \"\"\"\n 用語コマンドから応答を削除する\n \"\"\"\n term = Term.get(command=command)\n try:\n response = Response.get(term=term, text=text)\n except Response.DoesNotExist:\n reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)\n _send_markdown_text(message, reply)\n return\n response.delete_instance()\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\n<mask token>\n\n\ndef search_responses(message, command, keyword):\n \"\"\"\n 用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す\n \"\"\"\n term = Term.get(command=command)\n pat = '%{}%'.format(keyword)\n responses = Response.select().where(term == term, Response.text ** pat)\n if len(responses) == 0:\n botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command,\n keyword))\n else:\n pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\\n'.format(command,\n keyword, len(responses))\n data = [x.text for x in responses]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\ndef get_responses(message, command):\n \"\"\"\n 用語コマンドに登録されている応答の一覧を返す\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n pretext = 'コマンド `${}` の応答は {} 件あります\\n'.format(command, len(\n response_set))\n data = [x.text for x in response_set]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@respond_to('^term\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+create\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+add\\\\s+([\\\\w-]+)$')\ndef term_create(message, command):\n \"\"\"\n 指定されたコマンドを生成する\n \"\"\"\n if command in ('list', 'help'):\n return\n command = command.lower()\n if command in RESERVED:\n botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))\n return\n creator = message.body['user']\n term, created = Term.get_or_create(command=command, creator=creator)\n if not created:\n botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))\n else:\n msg = 'コマンド `${}` を作成しました。\\n'.format(command)\n msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)\n botsend(message, msg)\n commands.add(command)\n\n\n@respond_to('^term\\\\s+(drop|del|delete)\\\\s+([\\\\w-]+)$')\ndef term_drop(message, subcommand, command):\n \"\"\"\n 指定されたコマンドを消去する\n \"\"\"\n command = command.lower()\n if not _available_command(message, command):\n return\n term = Term.get(command=command)\n term.delete_instance(recursive=True)\n term.save()\n commands.remove(command)\n botsend(message, 'コマンド `${}` を消去しました'.format(command))\n\n\n<mask token>\n\n\n@respond_to('^term\\\\s+list$')\ndef term_list(message):\n \"\"\"\n 現在使用可能な用語コマンドの一覧を返す\n \"\"\"\n pretext = '用語コマンドの一覧です'\n attachments = _create_attachments_for_list(pretext, sorted(commands))\n botwebapi(message, attachments)\n\n\n<mask token>\n\n\ndef _send_markdown_text(message, text):\n \"\"\"\n 指定されたtextをmarkdown形式で送信する\n \"\"\"\n attachments = [{'pretext': text, 'mrkdwn_in': ['pretext']}]\n botwebapi(message, attachments)\n\n\n<mask token>\n\n\ndef add_response(message, command, text):\n \"\"\"\n 用語コマンドに応答を追加する\n \"\"\"\n if _exist_response(command, text):\n reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)\n _send_markdown_text(message, reply)\n return\n term = Term.get(command=command)\n creator = message.body['user']\n resp, created = Response.get_or_create(term=term, text=text, creator=\n creator, created=datetime.now())\n resp.save()\n text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)\n _send_markdown_text(message, text)\n\n\ndef del_response(message, command, text):\n \"\"\"\n 用語コマンドから応答を削除する\n \"\"\"\n term = Term.get(command=command)\n try:\n response = Response.get(term=term, text=text)\n except Response.DoesNotExist:\n reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)\n _send_markdown_text(message, reply)\n return\n response.delete_instance()\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\n<mask token>\n\n\ndef search_responses(message, command, keyword):\n \"\"\"\n 用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す\n \"\"\"\n term = Term.get(command=command)\n pat = '%{}%'.format(keyword)\n responses = Response.select().where(term == term, Response.text ** pat)\n if len(responses) == 0:\n botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command,\n keyword))\n else:\n pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\\n'.format(command,\n keyword, len(responses))\n data = [x.text for x in responses]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\ndef get_responses(message, command):\n \"\"\"\n 用語コマンドに登録されている応答の一覧を返す\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n pretext = 'コマンド `${}` の応答は {} 件あります\\n'.format(command, len(\n response_set))\n data = [x.text for x in response_set]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\n@respond_to('term\\\\s+help')\ndef term_help(message):\n \"\"\"\n term pluginのヘルプを返す\n \"\"\"\n botsend(message,\n \"\"\"- `$term (用語)`: 用語コマンドを作成する\n- `$term create (用語)`: 用語コマンドを作成する\n- `$term drop (用語)`: 用語コマンドを消去する\n- `$term search (キーワード)`: キーワードを含む用語コマンドの一覧を返す\n- `$term list`: 用語コマンドの一覧を返す\n\n- `$(用語)`: 用語コマンドに登録してある応答からランダムに一つ返す\n- `$(用語) add (応答)`: 用語コマンドに応答を追加する\n- `$(用語) del (応答)`: 用語コマンドから応答を削除する\n- `$(用語) pop`: 用語コマンドの最後に登録した応答を削除する\n- `$(用語) list`: 用語コマンドの応答一覧を返す\n- `$(用語) search (キーワード)`: 用語コマンドのうちキーワードを含む応答一覧を返す\n```\n> $term create 酒\nコマンド `$酒` を作成しました。\n`$酒 add (レスポンス)` でレスポンスを追加できます\n> $酒 add ビール\nコマンド `$酒` に `ビール` を追加しました\n> $酒 add ワイン\nコマンド `$酒` に `ワイン` を追加しました\n> $酒\nビール\n```\n\"\"\"\n )\n",
"step-3": "<mask token>\n\n\n@respond_to('^term\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+create\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+add\\\\s+([\\\\w-]+)$')\ndef term_create(message, command):\n \"\"\"\n 指定されたコマンドを生成する\n \"\"\"\n if command in ('list', 'help'):\n return\n command = command.lower()\n if command in RESERVED:\n botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))\n return\n creator = message.body['user']\n term, created = Term.get_or_create(command=command, creator=creator)\n if not created:\n botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))\n else:\n msg = 'コマンド `${}` を作成しました。\\n'.format(command)\n msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)\n botsend(message, msg)\n commands.add(command)\n\n\n@respond_to('^term\\\\s+(drop|del|delete)\\\\s+([\\\\w-]+)$')\ndef term_drop(message, subcommand, command):\n \"\"\"\n 指定されたコマンドを消去する\n \"\"\"\n command = command.lower()\n if not _available_command(message, command):\n return\n term = Term.get(command=command)\n term.delete_instance(recursive=True)\n term.save()\n commands.remove(command)\n botsend(message, 'コマンド `${}` を消去しました'.format(command))\n\n\ndef _create_attachments_for_list(pretext, data, command=True):\n \"\"\"\n 指定されたリストの一覧を message.send_webapi で送信するための\n attachments を生成する\n \"\"\"\n if command:\n list_text = ', '.join(['`${}`'.format(x) for x in data])\n else:\n list_text = '\\n'.join([x for x in data])\n attachments = [{'pretext': pretext, 'text': list_text, 'mrkdwn_in': [\n 'pretext', 'text']}]\n return attachments\n\n\n<mask token>\n\n\n@respond_to('^term\\\\s+list$')\ndef term_list(message):\n \"\"\"\n 現在使用可能な用語コマンドの一覧を返す\n \"\"\"\n pretext = '用語コマンドの一覧です'\n attachments = _create_attachments_for_list(pretext, sorted(commands))\n botwebapi(message, attachments)\n\n\ndef _available_command(message, command):\n \"\"\"\n 指定されたコマンドが有効化どうかを返す\n \"\"\"\n result = True\n if command in RESERVED:\n result = False\n elif command not in commands:\n botsend(message, 'コマンド `${}` は登録されていません'.format(command))\n result = False\n return result\n\n\ndef _send_markdown_text(message, text):\n \"\"\"\n 指定されたtextをmarkdown形式で送信する\n \"\"\"\n attachments = [{'pretext': text, 'mrkdwn_in': ['pretext']}]\n botwebapi(message, attachments)\n\n\n<mask token>\n\n\ndef add_response(message, command, text):\n \"\"\"\n 用語コマンドに応答を追加する\n \"\"\"\n if _exist_response(command, text):\n reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)\n _send_markdown_text(message, reply)\n return\n term = Term.get(command=command)\n creator = message.body['user']\n resp, created = Response.get_or_create(term=term, text=text, creator=\n creator, created=datetime.now())\n resp.save()\n text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)\n _send_markdown_text(message, text)\n\n\ndef del_response(message, command, text):\n \"\"\"\n 用語コマンドから応答を削除する\n \"\"\"\n term = Term.get(command=command)\n try:\n response = Response.get(term=term, text=text)\n except Response.DoesNotExist:\n reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)\n _send_markdown_text(message, reply)\n return\n response.delete_instance()\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\ndef pop_response(message, command):\n \"\"\"\n 用語コマンドで最後に登録された応答を削除する\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n return\n last_response = response_set.order_by(Response.created.desc())[0]\n text = last_response.text\n last_response.delete_instance()\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\ndef search_responses(message, command, keyword):\n \"\"\"\n 用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す\n \"\"\"\n term = Term.get(command=command)\n pat = '%{}%'.format(keyword)\n responses = Response.select().where(term == term, Response.text ** pat)\n if len(responses) == 0:\n botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command,\n keyword))\n else:\n pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\\n'.format(command,\n keyword, len(responses))\n data = [x.text for x in responses]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\ndef get_responses(message, command):\n \"\"\"\n 用語コマンドに登録されている応答の一覧を返す\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n pretext = 'コマンド `${}` の応答は {} 件あります\\n'.format(command, len(\n response_set))\n data = [x.text for x in response_set]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\n@respond_to('term\\\\s+help')\ndef term_help(message):\n \"\"\"\n term pluginのヘルプを返す\n \"\"\"\n botsend(message,\n \"\"\"- `$term (用語)`: 用語コマンドを作成する\n- `$term create (用語)`: 用語コマンドを作成する\n- `$term drop (用語)`: 用語コマンドを消去する\n- `$term search (キーワード)`: キーワードを含む用語コマンドの一覧を返す\n- `$term list`: 用語コマンドの一覧を返す\n\n- `$(用語)`: 用語コマンドに登録してある応答からランダムに一つ返す\n- `$(用語) add (応答)`: 用語コマンドに応答を追加する\n- `$(用語) del (応答)`: 用語コマンドから応答を削除する\n- `$(用語) pop`: 用語コマンドの最後に登録した応答を削除する\n- `$(用語) list`: 用語コマンドの応答一覧を返す\n- `$(用語) search (キーワード)`: 用語コマンドのうちキーワードを含む応答一覧を返す\n```\n> $term create 酒\nコマンド `$酒` を作成しました。\n`$酒 add (レスポンス)` でレスポンスを追加できます\n> $酒 add ビール\nコマンド `$酒` に `ビール` を追加しました\n> $酒 add ワイン\nコマンド `$酒` に `ワイン` を追加しました\n> $酒\nビール\n```\n\"\"\"\n )\n",
"step-4": "<mask token>\n\n\n@respond_to('^term\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+create\\\\s+([\\\\w-]+)$')\n@respond_to('^term\\\\s+add\\\\s+([\\\\w-]+)$')\ndef term_create(message, command):\n \"\"\"\n 指定されたコマンドを生成する\n \"\"\"\n if command in ('list', 'help'):\n return\n command = command.lower()\n if command in RESERVED:\n botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))\n return\n creator = message.body['user']\n term, created = Term.get_or_create(command=command, creator=creator)\n if not created:\n botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))\n else:\n msg = 'コマンド `${}` を作成しました。\\n'.format(command)\n msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)\n botsend(message, msg)\n commands.add(command)\n\n\n@respond_to('^term\\\\s+(drop|del|delete)\\\\s+([\\\\w-]+)$')\ndef term_drop(message, subcommand, command):\n \"\"\"\n 指定されたコマンドを消去する\n \"\"\"\n command = command.lower()\n if not _available_command(message, command):\n return\n term = Term.get(command=command)\n term.delete_instance(recursive=True)\n term.save()\n commands.remove(command)\n botsend(message, 'コマンド `${}` を消去しました'.format(command))\n\n\ndef _create_attachments_for_list(pretext, data, command=True):\n \"\"\"\n 指定されたリストの一覧を message.send_webapi で送信するための\n attachments を生成する\n \"\"\"\n if command:\n list_text = ', '.join(['`${}`'.format(x) for x in data])\n else:\n list_text = '\\n'.join([x for x in data])\n attachments = [{'pretext': pretext, 'text': list_text, 'mrkdwn_in': [\n 'pretext', 'text']}]\n return attachments\n\n\n@respond_to('^term\\\\s+search\\\\s+([\\\\w-]+)$')\ndef term_search(message, keyword):\n \"\"\"\n 指定したキーワードを含む用語コマンドの一覧を返す\n \"\"\"\n pretext = '`{}` を含む用語コマンドの一覧です'.format(keyword)\n data = []\n for command in sorted(commands):\n if keyword in command:\n data.append(command)\n attachments = _create_attachments_for_list(pretext, data)\n botwebapi(message, attachments)\n\n\n@respond_to('^term\\\\s+list$')\ndef term_list(message):\n \"\"\"\n 現在使用可能な用語コマンドの一覧を返す\n \"\"\"\n pretext = '用語コマンドの一覧です'\n attachments = _create_attachments_for_list(pretext, sorted(commands))\n botwebapi(message, attachments)\n\n\ndef _available_command(message, command):\n \"\"\"\n 指定されたコマンドが有効化どうかを返す\n \"\"\"\n result = True\n if command in RESERVED:\n result = False\n elif command not in commands:\n botsend(message, 'コマンド `${}` は登録されていません'.format(command))\n result = False\n return result\n\n\ndef _send_markdown_text(message, text):\n \"\"\"\n 指定されたtextをmarkdown形式で送信する\n \"\"\"\n attachments = [{'pretext': text, 'mrkdwn_in': ['pretext']}]\n botwebapi(message, attachments)\n\n\n@respond_to('^([\\\\w-]+)$')\ndef return_response(message, command):\n \"\"\"\n 用語コマンドに登録されている応答をランダムに返す\n \"\"\"\n if not _available_command(message, command):\n return\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n response = random.choice(response_set)\n _send_markdown_text(message, response.text)\n\n\n@respond_to('^([\\\\w-]+)\\\\s+(.*)')\ndef response(message, command, params):\n \"\"\"\n 用語コマンドの処理をする\n \"\"\"\n if not _available_command(message, command):\n return\n data = params.split(maxsplit=1)\n subcommand = data[0]\n try:\n if subcommand == 'pop':\n pop_response(message, command)\n elif subcommand == 'list':\n get_responses(message, command)\n elif subcommand == 'search':\n search_responses(message, command, data[1])\n elif subcommand in ('del', 'delete', 'remove'):\n del_response(message, command, data[1])\n elif subcommand == 'add':\n add_response(message, command, data[1])\n else:\n add_response(message, command, params)\n except IndexError:\n term_help(message)\n pass\n\n\n<mask token>\n\n\ndef add_response(message, command, text):\n \"\"\"\n 用語コマンドに応答を追加する\n \"\"\"\n if _exist_response(command, text):\n reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)\n _send_markdown_text(message, reply)\n return\n term = Term.get(command=command)\n creator = message.body['user']\n resp, created = Response.get_or_create(term=term, text=text, creator=\n creator, created=datetime.now())\n resp.save()\n text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)\n _send_markdown_text(message, text)\n\n\ndef del_response(message, command, text):\n \"\"\"\n 用語コマンドから応答を削除する\n \"\"\"\n term = Term.get(command=command)\n try:\n response = Response.get(term=term, text=text)\n except Response.DoesNotExist:\n reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)\n _send_markdown_text(message, reply)\n return\n response.delete_instance()\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\ndef pop_response(message, command):\n \"\"\"\n 用語コマンドで最後に登録された応答を削除する\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n return\n last_response = response_set.order_by(Response.created.desc())[0]\n text = last_response.text\n last_response.delete_instance()\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\ndef search_responses(message, command, keyword):\n \"\"\"\n 用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す\n \"\"\"\n term = Term.get(command=command)\n pat = '%{}%'.format(keyword)\n responses = Response.select().where(term == term, Response.text ** pat)\n if len(responses) == 0:\n botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command,\n keyword))\n else:\n pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\\n'.format(command,\n keyword, len(responses))\n data = [x.text for x in responses]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\ndef get_responses(message, command):\n \"\"\"\n 用語コマンドに登録されている応答の一覧を返す\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n pretext = 'コマンド `${}` の応答は {} 件あります\\n'.format(command, len(\n response_set))\n data = [x.text for x in response_set]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\n@respond_to('term\\\\s+help')\ndef term_help(message):\n \"\"\"\n term pluginのヘルプを返す\n \"\"\"\n botsend(message,\n \"\"\"- `$term (用語)`: 用語コマンドを作成する\n- `$term create (用語)`: 用語コマンドを作成する\n- `$term drop (用語)`: 用語コマンドを消去する\n- `$term search (キーワード)`: キーワードを含む用語コマンドの一覧を返す\n- `$term list`: 用語コマンドの一覧を返す\n\n- `$(用語)`: 用語コマンドに登録してある応答からランダムに一つ返す\n- `$(用語) add (応答)`: 用語コマンドに応答を追加する\n- `$(用語) del (応答)`: 用語コマンドから応答を削除する\n- `$(用語) pop`: 用語コマンドの最後に登録した応答を削除する\n- `$(用語) list`: 用語コマンドの応答一覧を返す\n- `$(用語) search (キーワード)`: 用語コマンドのうちキーワードを含む応答一覧を返す\n```\n> $term create 酒\nコマンド `$酒` を作成しました。\n`$酒 add (レスポンス)` でレスポンスを追加できます\n> $酒 add ビール\nコマンド `$酒` に `ビール` を追加しました\n> $酒 add ワイン\nコマンド `$酒` に `ワイン` を追加しました\n> $酒\nビール\n```\n\"\"\"\n )\n",
"step-5": "import random\nfrom datetime import datetime\n\nfrom slackbot.bot import respond_to\n\nfrom .term_model import Term, Response\nfrom ..botmessage import botsend, botwebapi\n\n# すでに存在するコマンドは無視する\nRESERVED = (\n 'drive', 'manual', 'jira', 'wikipedia', 'plusplus',\n 'translate', '翻訳',\n 'weather', '天気',\n 'term',\n 'shuffle', 'help', 'choice', 'ping', 'version', 'random', 'cal',\n 'google', 'image', 'map', 'gadmin',\n 'github',\n 'suddendeath',\n 'pycamp',\n 'lgtm',\n)\n\n# コマンド一覧を初期化\ncommands = {term.command for term in Term.select()}\n\n\n@respond_to('^term\\s+([\\w-]+)$')\n@respond_to('^term\\s+create\\s+([\\w-]+)$')\n@respond_to('^term\\s+add\\s+([\\w-]+)$')\ndef term_create(message, command):\n \"\"\"\n 指定されたコマンドを生成する\n \"\"\"\n if command in ('list', 'help'):\n return\n \n # コマンドは小文字に統一\n command = command.lower()\n # 予約語の場合は実行しない\n if command in RESERVED:\n botsend(message, 'コマンド `${}` は予約語なので登録できません'.format(command))\n return\n\n creator = message.body['user']\n term, created = Term.get_or_create(command=command, creator=creator)\n if not created:\n # すでに登録してあるコマンドは登録しない\n botsend(message, 'コマンド `${}` はすでに登録されています'.format(command))\n\n else:\n msg = 'コマンド `${}` を作成しました。\\n'.format(command)\n msg += '`${} add (レスポンス)` でレスポンスを追加できます'.format(command)\n botsend(message, msg)\n\n # コマンド一覧の set に追加\n commands.add(command)\n\n\n@respond_to('^term\\s+(drop|del|delete)\\s+([\\w-]+)$')\ndef term_drop(message, subcommand, command):\n \"\"\"\n 指定されたコマンドを消去する\n \"\"\"\n # コマンドは小文字に統一\n command = command.lower()\n\n # コマンドの存在チェック\n if not _available_command(message, command):\n return\n\n # 用語コマンドと応答をまとめて削除\n term = Term.get(command=command)\n term.delete_instance(recursive=True)\n term.save()\n\n # コマンド一覧の set から削除\n commands.remove(command)\n botsend(message, 'コマンド `${}` を消去しました'.format(command))\n\n\ndef _create_attachments_for_list(pretext, data, command=True):\n \"\"\"\n 指定されたリストの一覧を message.send_webapi で送信するための\n attachments を生成する\n \"\"\"\n if command:\n # ['foo', 'bar', 'baz'] -> '`$far`, `$bar`, `$baz`'\n list_text = ', '.join(['`${}`'.format(x) for x in data])\n else:\n list_text = '\\n'.join([x for x in data])\n attachments = [{\n 'pretext': pretext,\n 'text': list_text,\n 'mrkdwn_in': ['pretext', 'text'],\n }]\n return attachments\n\n\n@respond_to('^term\\s+search\\s+([\\w-]+)$')\ndef term_search(message, keyword):\n \"\"\"\n 指定したキーワードを含む用語コマンドの一覧を返す\n \"\"\"\n pretext = '`{}` を含む用語コマンドの一覧です'.format(keyword)\n data = []\n for command in sorted(commands):\n if keyword in command:\n data.append(command)\n attachments = _create_attachments_for_list(pretext, data)\n botwebapi(message, attachments)\n\n\n@respond_to('^term\\s+list$')\ndef term_list(message):\n \"\"\"\n 現在使用可能な用語コマンドの一覧を返す\n \"\"\"\n pretext = '用語コマンドの一覧です'\n attachments = _create_attachments_for_list(pretext, sorted(commands))\n botwebapi(message, attachments)\n\n\ndef _available_command(message, command):\n \"\"\"\n 指定されたコマンドが有効化どうかを返す\n \"\"\"\n result = True\n\n if command in RESERVED:\n result = False\n elif command not in commands:\n botsend(message, 'コマンド `${}` は登録されていません'.format(command))\n result = False\n\n return result\n\n\ndef _send_markdown_text(message, text):\n \"\"\"\n 指定されたtextをmarkdown形式で送信する\n \"\"\"\n attachments = [{\n 'pretext': text,\n 'mrkdwn_in': ['pretext'],\n }]\n botwebapi(message, attachments)\n\n\n@respond_to('^([\\w-]+)$')\ndef return_response(message, command):\n \"\"\"\n 用語コマンドに登録されている応答をランダムに返す\n \"\"\"\n if not _available_command(message, command):\n return\n\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n response = random.choice(response_set)\n _send_markdown_text(message, response.text)\n\n\n@respond_to('^([\\w-]+)\\s+(.*)')\ndef response(message, command, params):\n \"\"\"\n 用語コマンドの処理をする\n \"\"\"\n if not _available_command(message, command):\n return\n\n data = params.split(maxsplit=1)\n subcommand = data[0]\n try:\n if subcommand == 'pop':\n # 最後に登録された応答を削除\n pop_response(message, command)\n elif subcommand == 'list':\n # 応答の一覧を返す\n get_responses(message, command)\n elif subcommand == 'search':\n # 応答を検索\n search_responses(message, command, data[1])\n elif subcommand in ('del', 'delete', 'remove'):\n # 応答を削除\n del_response(message, command, data[1])\n elif subcommand == 'add':\n # 応答を追加\n add_response(message, command, data[1])\n else:\n # サブコマンドが存在しない場合も追加\n add_response(message, command, params)\n except IndexError:\n # ヘルプを返す\n term_help(message)\n pass\n\n\ndef _exist_response(command, text):\n \"\"\"\n 指定されたコマンドに応答が登録されているかを調べて返す\n \"\"\"\n term = Term.get(command=command)\n count = Response.select().where(Response.term == term,\n Response.text == text).count()\n if count == 0:\n return False\n else:\n return True\n\n\ndef add_response(message, command, text):\n \"\"\"\n 用語コマンドに応答を追加する\n \"\"\"\n\n # 登録済かどうかを確認する\n if _exist_response(command, text):\n reply = 'コマンド `${}` に「{}」は登録済みです'.format(command, text)\n _send_markdown_text(message, reply)\n return\n\n term = Term.get(command=command)\n creator = message.body['user']\n # 用語を登録する\n resp, created = Response.get_or_create(term=term, text=text,\n creator=creator,\n created=datetime.now())\n resp.save()\n text = 'コマンド `${}` に「{}」を追加しました'.format(command, text)\n _send_markdown_text(message, text)\n\n\ndef del_response(message, command, text):\n \"\"\"\n 用語コマンドから応答を削除する\n \"\"\"\n term = Term.get(command=command)\n try:\n response = Response.get(term=term, text=text)\n except Response.DoesNotExist:\n reply = 'コマンド `${}` に「{}」は登録されていません'.format(command, text)\n _send_markdown_text(message, reply)\n return\n\n # 応答を削除する\n response.delete_instance()\n\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\ndef pop_response(message, command):\n \"\"\"\n 用語コマンドで最後に登録された応答を削除する\n \"\"\"\n response_set = Term.get(command=command).response_set\n # 応答が登録されていない\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n return\n \n last_response = response_set.order_by(Response.created.desc())[0]\n text = last_response.text\n last_response.delete_instance()\n\n reply = 'コマンド `${}` から「{}」を削除しました'.format(command, text)\n _send_markdown_text(message, reply)\n\n\ndef search_responses(message, command, keyword):\n \"\"\"\n 用語コマンドに登録されている応答のうち、キーワードにマッチするものを返す\n \"\"\"\n term = Term.get(command=command)\n pat = '%{}%'.format(keyword)\n responses = Response.select().where(term == term, Response.text ** pat)\n\n if len(responses) == 0:\n botsend(message, 'コマンド `${}` に `{}` を含む応答はありません'.format(command, keyword))\n else:\n pretext = 'コマンド `${}` の `{}` を含む応答は {} 件あります\\n'.format(\n command, keyword, len(responses))\n data = [x.text for x in responses]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\ndef get_responses(message, command):\n \"\"\"\n 用語コマンドに登録されている応答の一覧を返す\n \"\"\"\n response_set = Term.get(command=command).response_set\n if len(response_set) == 0:\n msg = 'コマンド `${}` には応答が登録されていません\\n'.format(command)\n msg += '`${} add (レスポンス)` で応答を登録してください'.format(command)\n botsend(message, msg)\n else:\n pretext = 'コマンド `${}` の応答は {} 件あります\\n'.format(\n command, len(response_set))\n data = [x.text for x in response_set]\n attachments = _create_attachments_for_list(pretext, data, False)\n botwebapi(message, attachments)\n\n\n@respond_to('term\\s+help')\ndef term_help(message):\n \"\"\"\n term pluginのヘルプを返す\n \"\"\"\n botsend(message, '''- `$term (用語)`: 用語コマンドを作成する\n- `$term create (用語)`: 用語コマンドを作成する\n- `$term drop (用語)`: 用語コマンドを消去する\n- `$term search (キーワード)`: キーワードを含む用語コマンドの一覧を返す\n- `$term list`: 用語コマンドの一覧を返す\n\n- `$(用語)`: 用語コマンドに登録してある応答からランダムに一つ返す\n- `$(用語) add (応答)`: 用語コマンドに応答を追加する\n- `$(用語) del (応答)`: 用語コマンドから応答を削除する\n- `$(用語) pop`: 用語コマンドの最後に登録した応答を削除する\n- `$(用語) list`: 用語コマンドの応答一覧を返す\n- `$(用語) search (キーワード)`: 用語コマンドのうちキーワードを含む応答一覧を返す\n```\n> $term create 酒\nコマンド `$酒` を作成しました。\n`$酒 add (レスポンス)` でレスポンスを追加できます\n> $酒 add ビール\nコマンド `$酒` に `ビール` を追加しました\n> $酒 add ワイン\nコマンド `$酒` に `ワイン` を追加しました\n> $酒\nビール\n```\n''')\n",
"step-ids": [
7,
9,
12,
15,
19
]
}
|
[
7,
9,
12,
15,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(art.guess)
print(art.the)
print(art.number)
print("I'm thinking of a number between 1 and 100")
<|reserved_special_token_0|>
if difficulty == 'easy':
turns += 10
else:
turns += 5
<|reserved_special_token_0|>
while not gameover:
print(f"You've got {turns} turns left!")
guess = int(input('Guess a number!\n'))
if guess > number:
print('too high!')
turns -= 1
elif guess < number:
print('too low!')
turns -= 1
elif guess == number:
print('Thats it! You Win!')
gameover = True
if turns == 0:
print('You used all your chances!')
print('GAME OVER')
gameover = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(art.guess)
print(art.the)
print(art.number)
print("I'm thinking of a number between 1 and 100")
number = random.randint(1, 100)
turns = 0
difficulty = input("Chose a difficulty. 'easy' or 'hard'?\n")
if difficulty == 'easy':
turns += 10
else:
turns += 5
gameover = False
while not gameover:
print(f"You've got {turns} turns left!")
guess = int(input('Guess a number!\n'))
if guess > number:
print('too high!')
turns -= 1
elif guess < number:
print('too low!')
turns -= 1
elif guess == number:
print('Thats it! You Win!')
gameover = True
if turns == 0:
print('You used all your chances!')
print('GAME OVER')
gameover = True
<|reserved_special_token_1|>
import art
import random
print(art.guess)
print(art.the)
print(art.number)
print("I'm thinking of a number between 1 and 100")
number = random.randint(1, 100)
turns = 0
difficulty = input("Chose a difficulty. 'easy' or 'hard'?\n")
if difficulty == 'easy':
turns += 10
else:
turns += 5
gameover = False
while not gameover:
print(f"You've got {turns} turns left!")
guess = int(input('Guess a number!\n'))
if guess > number:
print('too high!')
turns -= 1
elif guess < number:
print('too low!')
turns -= 1
elif guess == number:
print('Thats it! You Win!')
gameover = True
if turns == 0:
print('You used all your chances!')
print('GAME OVER')
gameover = True
<|reserved_special_token_1|>
import art
import random
print(art.guess)
print(art.the)
print(art.number)
print("I'm thinking of a number between 1 and 100")
number = random.randint(1,100)
turns = 0
difficulty = input("Chose a difficulty. 'easy' or 'hard'?\n")
if difficulty == 'easy':
turns +=10
else:
turns +=5
gameover = False
while not gameover:
print(f"You've got {turns} turns left!")
guess = int(input("Guess a number!\n"))
if guess > number:
print("too high!")
turns -= 1
elif guess < number:
print("too low!")
turns -= 1
elif guess == number:
print("Thats it! You Win!")
gameover = True
if turns == 0:
print("You used all your chances!")
print("GAME OVER")
gameover = True
|
flexible
|
{
"blob_id": "f2bf4f5b057af1d2362ec8d1472aa76e774be1c7",
"index": 2736,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\n<mask token>\nif difficulty == 'easy':\n turns += 10\nelse:\n turns += 5\n<mask token>\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input('Guess a number!\\n'))\n if guess > number:\n print('too high!')\n turns -= 1\n elif guess < number:\n print('too low!')\n turns -= 1\n elif guess == number:\n print('Thats it! You Win!')\n gameover = True\n if turns == 0:\n print('You used all your chances!')\n print('GAME OVER')\n gameover = True\n",
"step-3": "<mask token>\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\nnumber = random.randint(1, 100)\nturns = 0\ndifficulty = input(\"Chose a difficulty. 'easy' or 'hard'?\\n\")\nif difficulty == 'easy':\n turns += 10\nelse:\n turns += 5\ngameover = False\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input('Guess a number!\\n'))\n if guess > number:\n print('too high!')\n turns -= 1\n elif guess < number:\n print('too low!')\n turns -= 1\n elif guess == number:\n print('Thats it! You Win!')\n gameover = True\n if turns == 0:\n print('You used all your chances!')\n print('GAME OVER')\n gameover = True\n",
"step-4": "import art\nimport random\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\nnumber = random.randint(1, 100)\nturns = 0\ndifficulty = input(\"Chose a difficulty. 'easy' or 'hard'?\\n\")\nif difficulty == 'easy':\n turns += 10\nelse:\n turns += 5\ngameover = False\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input('Guess a number!\\n'))\n if guess > number:\n print('too high!')\n turns -= 1\n elif guess < number:\n print('too low!')\n turns -= 1\n elif guess == number:\n print('Thats it! You Win!')\n gameover = True\n if turns == 0:\n print('You used all your chances!')\n print('GAME OVER')\n gameover = True\n",
"step-5": "import art\nimport random\n\nprint(art.guess)\nprint(art.the)\nprint(art.number)\nprint(\"I'm thinking of a number between 1 and 100\")\n\nnumber = random.randint(1,100)\nturns = 0\n\ndifficulty = input(\"Chose a difficulty. 'easy' or 'hard'?\\n\")\n\nif difficulty == 'easy':\n turns +=10\nelse:\n turns +=5\n\ngameover = False\n\nwhile not gameover:\n print(f\"You've got {turns} turns left!\")\n guess = int(input(\"Guess a number!\\n\"))\n\n if guess > number:\n print(\"too high!\")\n turns -= 1\n elif guess < number:\n print(\"too low!\")\n turns -= 1\n elif guess == number:\n print(\"Thats it! You Win!\")\n gameover = True\n\n if turns == 0:\n print(\"You used all your chances!\")\n print(\"GAME OVER\")\n gameover = True",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def valid(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = 0
for line in lines:
split = line.rstrip().split(' ')
if len(split) == len(set(split)):
result += 1
return result
<|reserved_special_token_0|>
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def valid(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = 0
for line in lines:
split = line.rstrip().split(' ')
if len(split) == len(set(split)):
result += 1
return result
<|reserved_special_token_0|>
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
if __name__ == '__main__':
print(valid('day4-input.txt'))
print(valid_anagram('day4-input.txt'))
<|reserved_special_token_1|>
# Advent of Code: Day 4
"""A new system policy has been put in place that requires all accounts to
use a passphrase instead of simply a password. A passphrase consists of a
series of words (lowercase letters) separated by spaces.
To ensure security, a valid passphrase must contain no duplicate words.
"""
def valid(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = 0
for line in lines:
split = line.rstrip().split(' ')
if len(split) == len(set(split)):
result += 1
return result
"""For added security, yet another system policy has been put in place.
Now, a valid passphrase must contain no two words that are anagrams of
each other - that is, a passphrase is invalid if any word's letters can
be rearranged to form any other word in the passphrase.
"""
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
if __name__ == '__main__':
print(valid('day4-input.txt'))
print(valid_anagram('day4-input.txt'))
|
flexible
|
{
"blob_id": "7dce240a891e807b1f5251a09a69368f4e513973",
"index": 4472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef valid(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = 0\n for line in lines:\n split = line.rstrip().split(' ')\n if len(split) == len(set(split)):\n result += 1\n return result\n\n\n<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef valid(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = 0\n for line in lines:\n split = line.rstrip().split(' ')\n if len(split) == len(set(split)):\n result += 1\n return result\n\n\n<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\nif __name__ == '__main__':\n print(valid('day4-input.txt'))\n print(valid_anagram('day4-input.txt'))\n",
"step-5": "# Advent of Code: Day 4\n\n\"\"\"A new system policy has been put in place that requires all accounts to \nuse a passphrase instead of simply a password. A passphrase consists of a \nseries of words (lowercase letters) separated by spaces.\n\nTo ensure security, a valid passphrase must contain no duplicate words.\n\n\"\"\"\ndef valid(filename):\n\tf = open(filename, 'r')\n\tlines = f.readlines()\n\tf.close()\n\t\n\tresult = 0\n\tfor line in lines:\n\t\tsplit = line.rstrip().split(' ')\n\t\tif len(split) == len(set(split)):\n\t\t\tresult += 1\t\t\n\t\t\t\n\treturn result\n\t\n\n\"\"\"For added security, yet another system policy has been put in place. \nNow, a valid passphrase must contain no two words that are anagrams of \neach other - that is, a passphrase is invalid if any word's letters can \nbe rearranged to form any other word in the passphrase.\n\n\"\"\"\t\t\ndef valid_anagram(filename):\n\tf = open(filename, 'r')\n\tlines = f.readlines()\n\tf.close()\n\t\n\tresult = len(lines)\n\tfor line in lines:\n\t\tsplit = line.rstrip().split(' ')\n\t\tsplit = [sorted(s) for s in split]\n\t\tfor word in split:\n\t\t\tif split.count(word) > 1:\n\t\t\t\tresult -= 1\n\t\t\t\tbreak\t\t\n\t\t\t\n\treturn result\t\n\t\n\t\nif __name__ == '__main__':\n\tprint(valid('day4-input.txt'))\n\tprint(valid_anagram('day4-input.txt'))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LinkedList:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while last.nextVal:
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getNthNode(self, n):
curr = self.headVal
i = 1
while curr:
if i == n:
print(curr.dataVal)
return
i += 1
curr = curr.nextVal
<|reserved_special_token_0|>
def reverseLinkedList(self):
curr = self.headVal
print('1:', curr.dataVal)
while curr:
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while curr.dataVal is None:
if curr.dataVal == firstNode.dataVal:
ans = True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LinkedList:
def __init__(self):
self.headVal = None
<|reserved_special_token_0|>
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while last.nextVal:
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def deleteNthNode(self, n):
last = self.headVal
i = 2
while last:
if i == n:
prevNode = last
nextNode = last.nextVal.nextVal
prevNode.nextVal = nextNode
return
i += 1
last = last.nextVal
def getNthNode(self, n):
curr = self.headVal
i = 1
while curr:
if i == n:
print(curr.dataVal)
return
i += 1
curr = curr.nextVal
def reverse(self, node):
print('2:', node.dataVal)
if node.nextVal == None:
self.headVal = node
return
print('3:', node.dataVal)
self.reverse(node.nextVal)
tmp = node.nextVal
tmp.nextVal = node
node.nextVal = None
def reverseLinkedList(self):
curr = self.headVal
print('1:', curr.dataVal)
while curr:
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while curr.dataVal is None:
if curr.dataVal == firstNode.dataVal:
ans = True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LinkedList:
def __init__(self):
self.headVal = None
def atBeginning(self, data):
NewNode = Node(data)
NewNode.nextVal = self.headVal
self.headVal = NewNode
return NewNode
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while last.nextVal:
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
def deleteNode(self, node):
last = self.headVal
if self.headVal == node:
self.headVal = node.nextVal
return
else:
while last:
if last.nextVal.dataVal == node.dataVal:
if last.nextVal is not None:
last.nextVal = node.nextVal
return
else:
self.headVal.nextVal = None
last = last.nextVal
def printList(self):
self.printVal = self.headVal
while self.printVal is not None:
print(self.printVal.dataVal)
self.printVal = self.printVal.nextVal
def deleteNthNode(self, n):
last = self.headVal
i = 2
while last:
if i == n:
prevNode = last
nextNode = last.nextVal.nextVal
prevNode.nextVal = nextNode
return
i += 1
last = last.nextVal
def getNthNode(self, n):
curr = self.headVal
i = 1
while curr:
if i == n:
print(curr.dataVal)
return
i += 1
curr = curr.nextVal
def reverse(self, node):
print('2:', node.dataVal)
if node.nextVal == None:
self.headVal = node
return
print('3:', node.dataVal)
self.reverse(node.nextVal)
tmp = node.nextVal
tmp.nextVal = node
node.nextVal = None
def reverseLinkedList(self):
curr = self.headVal
print('1:', curr.dataVal)
while curr:
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while curr.dataVal is None:
if curr.dataVal == firstNode.dataVal:
ans = True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, dataVal=None):
self.dataVal = dataVal
self.nextVal = None
class LinkedList:
def __init__(self):
self.headVal = None
def atBeginning(self, data):
NewNode = Node(data)
NewNode.nextVal = self.headVal
self.headVal = NewNode
return NewNode
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while last.nextVal:
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
def deleteNode(self, node):
last = self.headVal
if self.headVal == node:
self.headVal = node.nextVal
return
else:
while last:
if last.nextVal.dataVal == node.dataVal:
if last.nextVal is not None:
last.nextVal = node.nextVal
return
else:
self.headVal.nextVal = None
last = last.nextVal
def printList(self):
self.printVal = self.headVal
while self.printVal is not None:
print(self.printVal.dataVal)
self.printVal = self.printVal.nextVal
def deleteNthNode(self, n):
last = self.headVal
i = 2
while last:
if i == n:
prevNode = last
nextNode = last.nextVal.nextVal
prevNode.nextVal = nextNode
return
i += 1
last = last.nextVal
def getNthNode(self, n):
curr = self.headVal
i = 1
while curr:
if i == n:
print(curr.dataVal)
return
i += 1
curr = curr.nextVal
def reverse(self, node):
print('2:', node.dataVal)
if node.nextVal == None:
self.headVal = node
return
print('3:', node.dataVal)
self.reverse(node.nextVal)
tmp = node.nextVal
tmp.nextVal = node
node.nextVal = None
def reverseLinkedList(self):
curr = self.headVal
print('1:', curr.dataVal)
while curr:
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while curr.dataVal is None:
if curr.dataVal == firstNode.dataVal:
ans = True
<|reserved_special_token_0|>
list1.reverseLinkedList()
<|reserved_special_token_1|>
class Node:
def __init__(self, dataVal=None):
self.dataVal = dataVal
self.nextVal = None
class LinkedList:
def __init__(self):
self.headVal = None
def atBeginning(self, data):
NewNode = Node(data)
NewNode.nextVal = self.headVal
self.headVal = NewNode
return NewNode
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while(last.nextVal):
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
def deleteNode(self,node):
last = self.headVal
if self.headVal == node:
self.headVal = node.nextVal
return
else:
while(last):
if (last.nextVal.dataVal) == (node.dataVal):
if last.nextVal is not None:
last.nextVal = node.nextVal
return
else:
self.headVal.nextVal = None
last = last.nextVal
def printList(self):
self.printVal = self.headVal
while self.printVal is not None:
# print(self.printVal.dataVal)
print(self.printVal.dataVal)
self.printVal = self.printVal.nextVal
# def isPalindrome(self):
def deleteNthNode(self, n):
last = self.headVal
i=2
while(last):
if i == n:
prevNode = last
nextNode = last.nextVal.nextVal
prevNode.nextVal = nextNode
return
i+=1
last = last.nextVal
def getNthNode(self, n):
curr = self.headVal
i=1
while(curr):
if i == n:
print (curr.dataVal)
return
i+=1
curr = curr.nextVal
def reverse(self, node):
print("2:", node.dataVal)
if node.nextVal == None:
self.headVal = node
return
print("3:", node.dataVal)
self.reverse(node.nextVal)
tmp = node.nextVal
tmp.nextVal = node
node.nextVal = None
def reverseLinkedList(self):
curr = self.headVal
print("1:", curr.dataVal)
while(curr):
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while(curr.dataVal is None):
if curr.dataVal == firstNode.dataVal:
ans = True
list1 = LinkedList()
list1.headVal = Node(1)
e2 = Node(2)
e3 = Node(3)
e4 = Node(4)
list1.headVal.nextVal = e2
e2.nextVal = e3
e3.nextVal = e4
e4.nextVal = None
# list1.headVal = Node('Sunday')
# e2 = Node('Monday')
# e3 = Node('Tuesday')
# e4 = Node('Wednesday')
# e5 = Node('Thursday')
# e6 = Node('Friday')
# e7 = Node('Saturday')
# list1.headVal.nextVal = e2
# e2.nextVal = e3
# e3.nextVal = e4
# e4.nextVal = e5
# e5.nextVal = e6
# e6.nextVal = e7
# e8 = list1.atBeginning('MyTestJanuary')
# e9 = list1.atEnd('MyTestDecember')
# e10 = list1.inBetween(list1.headVal, e2, 'I hate this')
# e11 = list1.inBetween(e6, e7, 'I love this')
# list1.deleteNode(e2)
# list1.printList()
# list1.deleteNode(list1.headVal)
# list1.printList()
# print(">>>>>>>>>",type(e6), type(e8), type(e10))
# list1.deleteNode(e9)
# print("Deleting the last node>>>>>>")
# list1.deleteNthNode(3)
# list1.getNthNode(3)
# list1.printList()
list1.reverseLinkedList()
# list1.printList()
# e10 = list1.atBeginning('1')
# e8.nextVal = None
|
flexible
|
{
"blob_id": "00260e23614a7b0a11ff3649e71392e4892de423",
"index": 4511,
"step-1": "<mask token>\n\n\nclass LinkedList:\n <mask token>\n <mask token>\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n <mask token>\n <mask token>\n <mask token>\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n <mask token>\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.headVal = None\n <mask token>\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n <mask token>\n <mask token>\n\n def deleteNthNode(self, n):\n last = self.headVal\n i = 2\n while last:\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i += 1\n last = last.nextVal\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n\n def reverse(self, node):\n print('2:', node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print('3:', node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.headVal = None\n\n def atBeginning(self, data):\n NewNode = Node(data)\n NewNode.nextVal = self.headVal\n self.headVal = NewNode\n return NewNode\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n\n def deleteNode(self, node):\n last = self.headVal\n if self.headVal == node:\n self.headVal = node.nextVal\n return\n else:\n while last:\n if last.nextVal.dataVal == node.dataVal:\n if last.nextVal is not None:\n last.nextVal = node.nextVal\n return\n else:\n self.headVal.nextVal = None\n last = last.nextVal\n\n def printList(self):\n self.printVal = self.headVal\n while self.printVal is not None:\n print(self.printVal.dataVal)\n self.printVal = self.printVal.nextVal\n\n def deleteNthNode(self, n):\n last = self.headVal\n i = 2\n while last:\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i += 1\n last = last.nextVal\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n\n def reverse(self, node):\n print('2:', node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print('3:', node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, dataVal=None):\n self.dataVal = dataVal\n self.nextVal = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.headVal = None\n\n def atBeginning(self, data):\n NewNode = Node(data)\n NewNode.nextVal = self.headVal\n self.headVal = NewNode\n return NewNode\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n\n def deleteNode(self, node):\n last = self.headVal\n if self.headVal == node:\n self.headVal = node.nextVal\n return\n else:\n while last:\n if last.nextVal.dataVal == node.dataVal:\n if last.nextVal is not None:\n last.nextVal = node.nextVal\n return\n else:\n self.headVal.nextVal = None\n last = last.nextVal\n\n def printList(self):\n self.printVal = self.headVal\n while self.printVal is not None:\n print(self.printVal.dataVal)\n self.printVal = self.printVal.nextVal\n\n def deleteNthNode(self, n):\n last = self.headVal\n i = 2\n while last:\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i += 1\n last = last.nextVal\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n\n def reverse(self, node):\n print('2:', node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print('3:', node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\nlist1.reverseLinkedList()\n",
"step-5": "class Node:\n def __init__(self, dataVal=None):\n self.dataVal = dataVal\n self.nextVal = None\n\nclass LinkedList:\n def __init__(self):\n self.headVal = None\n def atBeginning(self, data):\n NewNode = Node(data)\n NewNode.nextVal = self.headVal\n self.headVal = NewNode\n return NewNode\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while(last.nextVal):\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n \n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n\n def deleteNode(self,node):\n last = self.headVal\n if self.headVal == node:\n self.headVal = node.nextVal \n return\n else:\n while(last):\n if (last.nextVal.dataVal) == (node.dataVal):\n if last.nextVal is not None:\n last.nextVal = node.nextVal\n return\n else:\n self.headVal.nextVal = None\n\n last = last.nextVal\n \n def printList(self):\n self.printVal = self.headVal\n while self.printVal is not None:\n # print(self.printVal.dataVal)\n print(self.printVal.dataVal)\n self.printVal = self.printVal.nextVal\n # def isPalindrome(self):\n def deleteNthNode(self, n):\n last = self.headVal\n i=2\n while(last):\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i+=1\n last = last.nextVal\n def getNthNode(self, n):\n curr = self.headVal\n i=1\n while(curr):\n if i == n:\n print (curr.dataVal)\n return\n i+=1\n curr = curr.nextVal\n \n def reverse(self, node):\n print(\"2:\", node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print(\"3:\", node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print(\"1:\", curr.dataVal)\n while(curr):\n self.reverse(curr)\n curr = curr.nextVal\n \n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while(curr.dataVal is None):\n if curr.dataVal == firstNode.dataVal:\n ans = True\n \n\n\n\n \n\n \n\nlist1 = LinkedList()\nlist1.headVal = Node(1)\ne2 = Node(2)\ne3 = Node(3)\ne4 = Node(4)\nlist1.headVal.nextVal = e2\ne2.nextVal = e3\ne3.nextVal = e4\ne4.nextVal = None\n# list1.headVal = Node('Sunday')\n# e2 = Node('Monday')\n# e3 = Node('Tuesday')\n# e4 = Node('Wednesday')\n# e5 = Node('Thursday')\n# e6 = Node('Friday')\n# e7 = Node('Saturday')\n\n# list1.headVal.nextVal = e2\n# e2.nextVal = e3\n# e3.nextVal = e4\n# e4.nextVal = e5\n# e5.nextVal = e6\n# e6.nextVal = e7\n# e8 = list1.atBeginning('MyTestJanuary')\n# e9 = list1.atEnd('MyTestDecember')\n# e10 = list1.inBetween(list1.headVal, e2, 'I hate this')\n# e11 = list1.inBetween(e6, e7, 'I love this')\n# list1.deleteNode(e2)\n# list1.printList()\n\n# list1.deleteNode(list1.headVal)\n# list1.printList()\n\n\n# print(\">>>>>>>>>\",type(e6), type(e8), type(e10))\n# list1.deleteNode(e9)\n# print(\"Deleting the last node>>>>>>\")\n\n# list1.deleteNthNode(3)\n# list1.getNthNode(3)\n# list1.printList()\nlist1.reverseLinkedList()\n# list1.printList()\n# e10 = list1.atBeginning('1')\n# e8.nextVal = None\n\n\n\n\n\n",
"step-ids": [
6,
9,
12,
15,
17
]
}
|
[
6,
9,
12,
15,
17
] |
import pandas
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import json
CONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')
def create_new_report(chrome_driver_inner, report_info_inner):
add_new_report = chrome_driver_inner.find_element_by_id('MainContent_MainActionCreate')
add_new_report.click()
next_button = chrome_driver_inner.find_element_by_id('MainContent_AAWiz__Next')
next_button.click()
name_text = chrome_driver_inner.find_element_by_id('MainContent_ClientProjectName')
name_text.clear()
name_text.send_keys('{} - {} - {}'.format(report_info_inner['new_report_string'],
report_info_inner['start_date'],
report_info_inner['end_date']))
start_date_text = chrome_driver_inner.find_element_by_id('MainContent_StartDate_input')
start_date_text.clear()
start_date_text.send_keys(report_info_inner['start_date'])
end_date_text = chrome_driver_inner.find_element_by_id('MainContent_EndDate_input')
end_date_text.clear()
end_date_text.send_keys(report_info_inner['end_date'])
def execute_expense_report(report_filename=CONFIG_FILE_NAME,
report_info=None):
if report_filename and not report_info:
with open(report_filename, 'r') as input_file:
report_info = json.load(input_file)
report_info['password'] = ''
report_info['user_name'] = ''
file_name = report_info['reconciliation_report_location']
excel_file = pandas.ExcelFile(file_name)
pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)
recon_df = pcard_df['PCard Reconciliation Report']
names = recon_df['Employee Name'].dropna().unique()
chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__), 'chromedriver.exe'))
did_not_finish_list= []
finished_users =[]
logon_website = report_info['logon_website']
chrome_driver.get(logon_website)
chrome_driver.find_element_by_id('userNameInput').send_keys(report_info['email_address'])
chrome_driver.find_element_by_id('passwordInput').send_keys(report_info['password'])
chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)
chosen_names = names
for current_id, the_name in enumerate(chosen_names):
chrome_driver.implicitly_wait(0)
print('Processing user {} of {}, {}'.format(current_id+1, len(chosen_names), the_name))
current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
configuration_link = chrome_driver.find_element_by_id('topNavToolsConfigurationLink')
configuration_link.click()
view_and_edit_users = chrome_driver.find_element_by_id('MainContent_ctl69')
view_and_edit_users.click()
last_name = chrome_driver.find_element_by_id('MainContent_LName')
last_name_str = the_name.split()[1]
last_name.send_keys(last_name_str)
last_name.send_keys(Keys.ENTER)
user_tag = chrome_driver.find_element_by_xpath("//nobr[text() = \"{}\"]".format(the_name))
edit_user = user_tag.find_elements_by_xpath("../..//img[@src='images/16_edit.png']")
edit_user[0].click()
switch_user = chrome_driver.find_element_by_link_text('Switch to this User')
switch_user.click()
more_items = chrome_driver.find_element_by_id('MainContent_lblWalletMoreItems')
more_items.click()
transaction_list = chrome_driver.find_elements_by_xpath("//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']")
for i_val in transaction_list:
i_val.find_element_by_xpath("../..//input[@type='checkbox']").click()
try:
add_content = chrome_driver.find_element_by_id('MainContent_Add')
add_content.click()
except:
did_not_finish_list.append(the_name)
continue
#time.sleep(3)
chrome_driver.implicitly_wait(int(report_info['wait_time']))
try:
add_to_existing = chrome_driver.find_element_by_id('MainContent_MainActionAdd')
add_to_existing.click()
except NoSuchElementException:
did_not_finish_list.append(the_name)
continue
chrome_driver.implicitly_wait(0)
if add_to_existing.get_attribute('disabled') == 'true':
create_new_report(chrome_driver, report_info)
else:
next_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Next')
next_button.click()
selected_report = Select(chrome_driver.find_element_by_id('MainContent_SelectedExpenseReport'))
try:
selected_report.select_by_visible_text('{} - {} - {}'.format(report_info['report_executive_string'],
report_info['start_date'],
report_info['end_date']))
except NoSuchElementException:
back_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Back')
back_button.click()
create_new_report(chrome_driver, report_info)
next_button_2= chrome_driver.find_element_by_id('MainContent_AAWiz__Next')
next_button_2.click()
finished_users.append(the_name)
current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))
current_user_dropdown.select_by_visible_text(report_info['user_name'])
print('Did not finish: {}'.format(did_not_finish_list))
if __name__ == '__main__':
execute_expense_report()
|
normal
|
{
"blob_id": "14cb702054b8caaa8899a2a3d8b65aae9b063cb6",
"index": 5600,
"step-1": "<mask token>\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()\n",
"step-3": "<mask token>\nCONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()\n",
"step-4": "import pandas\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nimport json\nCONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n add_new_report = chrome_driver_inner.find_element_by_id(\n 'MainContent_MainActionCreate')\n add_new_report.click()\n next_button = chrome_driver_inner.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n name_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner[\n 'new_report_string'], report_info_inner['start_date'],\n report_info_inner['end_date']))\n start_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n end_date_text = chrome_driver_inner.find_element_by_id(\n 'MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME, report_info=None):\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n recon_df = pcard_df['PCard Reconciliation Report']\n names = recon_df['Employee Name'].dropna().unique()\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__),\n 'chromedriver.exe'))\n did_not_finish_list = []\n finished_users = []\n logon_website = report_info['logon_website']\n chrome_driver.get(logon_website)\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info\n ['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info\n ['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n chosen_names = names\n for current_id, the_name in enumerate(chosen_names):\n chrome_driver.implicitly_wait(0)\n print('Processing user {} of {}, {}'.format(current_id + 1, len(\n chosen_names), the_name))\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n configuration_link = chrome_driver.find_element_by_id(\n 'topNavToolsConfigurationLink')\n configuration_link.click()\n view_and_edit_users = chrome_driver.find_element_by_id(\n 'MainContent_ctl69')\n view_and_edit_users.click()\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n user_tag = chrome_driver.find_element_by_xpath('//nobr[text() = \"{}\"]'\n .format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\n \"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n switch_user = chrome_driver.find_element_by_link_text(\n 'Switch to this User')\n switch_user.click()\n more_items = chrome_driver.find_element_by_id(\n 'MainContent_lblWalletMoreItems')\n more_items.click()\n transaction_list = chrome_driver.find_elements_by_xpath(\n \"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\"\n )\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\"\n ).click()\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n try:\n add_to_existing = chrome_driver.find_element_by_id(\n 'MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n chrome_driver.implicitly_wait(0)\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button.click()\n selected_report = Select(chrome_driver.find_element_by_id(\n 'MainContent_SelectedExpenseReport'))\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.\n format(report_info['report_executive_string'],\n report_info['start_date'], report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Back')\n back_button.click()\n create_new_report(chrome_driver, report_info)\n next_button_2 = chrome_driver.find_element_by_id(\n 'MainContent_AAWiz__Next')\n next_button_2.click()\n finished_users.append(the_name)\n current_user_dropdown = Select(chrome_driver.find_element_by_id(\n 'CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()\n",
"step-5": "import pandas\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.common.exceptions import NoSuchElementException\nimport json\n\nCONFIG_FILE_NAME = os.path.join(os.path.dirname(__file__), 'input_info.json')\n\n\ndef create_new_report(chrome_driver_inner, report_info_inner):\n\n add_new_report = chrome_driver_inner.find_element_by_id('MainContent_MainActionCreate')\n add_new_report.click()\n\n next_button = chrome_driver_inner.find_element_by_id('MainContent_AAWiz__Next')\n next_button.click()\n\n name_text = chrome_driver_inner.find_element_by_id('MainContent_ClientProjectName')\n name_text.clear()\n name_text.send_keys('{} - {} - {}'.format(report_info_inner['new_report_string'],\n report_info_inner['start_date'],\n report_info_inner['end_date']))\n\n start_date_text = chrome_driver_inner.find_element_by_id('MainContent_StartDate_input')\n start_date_text.clear()\n start_date_text.send_keys(report_info_inner['start_date'])\n\n end_date_text = chrome_driver_inner.find_element_by_id('MainContent_EndDate_input')\n end_date_text.clear()\n end_date_text.send_keys(report_info_inner['end_date'])\n\n\n\ndef execute_expense_report(report_filename=CONFIG_FILE_NAME,\n report_info=None):\n\n if report_filename and not report_info:\n with open(report_filename, 'r') as input_file:\n report_info = json.load(input_file)\n report_info['password'] = ''\n report_info['user_name'] = ''\n \n file_name = report_info['reconciliation_report_location']\n excel_file = pandas.ExcelFile(file_name)\n pcard_df = excel_file.parse(excel_file.sheet_names, skiprows=8)\n\n recon_df = pcard_df['PCard Reconciliation Report']\n\n names = recon_df['Employee Name'].dropna().unique()\n\n chrome_driver = webdriver.Chrome(os.path.join(os.path.dirname(__file__), 'chromedriver.exe'))\n\n did_not_finish_list= []\n finished_users =[]\n\n logon_website = report_info['logon_website']\n\n chrome_driver.get(logon_website)\n\n chrome_driver.find_element_by_id('userNameInput').send_keys(report_info['email_address'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(report_info['password'])\n chrome_driver.find_element_by_id('passwordInput').send_keys(Keys.ENTER)\n\n chosen_names = names\n\n for current_id, the_name in enumerate(chosen_names):\n\n chrome_driver.implicitly_wait(0)\n\n print('Processing user {} of {}, {}'.format(current_id+1, len(chosen_names), the_name))\n\n current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n\n configuration_link = chrome_driver.find_element_by_id('topNavToolsConfigurationLink')\n configuration_link.click()\n\n view_and_edit_users = chrome_driver.find_element_by_id('MainContent_ctl69')\n view_and_edit_users.click()\n\n last_name = chrome_driver.find_element_by_id('MainContent_LName')\n last_name_str = the_name.split()[1]\n\n last_name.send_keys(last_name_str)\n last_name.send_keys(Keys.ENTER)\n\n user_tag = chrome_driver.find_element_by_xpath(\"//nobr[text() = \\\"{}\\\"]\".format(the_name))\n edit_user = user_tag.find_elements_by_xpath(\"../..//img[@src='images/16_edit.png']\")\n edit_user[0].click()\n\n switch_user = chrome_driver.find_element_by_link_text('Switch to this User')\n switch_user.click()\n\n more_items = chrome_driver.find_element_by_id('MainContent_lblWalletMoreItems')\n more_items.click()\n\n transaction_list = chrome_driver.find_elements_by_xpath(\"//*[@class='feed_row-primary']//img[@src='images/16_credit-card.png']\")\n for i_val in transaction_list:\n i_val.find_element_by_xpath(\"../..//input[@type='checkbox']\").click()\n\n try:\n add_content = chrome_driver.find_element_by_id('MainContent_Add')\n add_content.click()\n except:\n did_not_finish_list.append(the_name)\n continue\n\n #time.sleep(3)\n\n chrome_driver.implicitly_wait(int(report_info['wait_time']))\n\n try:\n add_to_existing = chrome_driver.find_element_by_id('MainContent_MainActionAdd')\n add_to_existing.click()\n except NoSuchElementException:\n did_not_finish_list.append(the_name)\n continue\n\n chrome_driver.implicitly_wait(0)\n\n if add_to_existing.get_attribute('disabled') == 'true':\n create_new_report(chrome_driver, report_info)\n else:\n next_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Next')\n next_button.click()\n\n selected_report = Select(chrome_driver.find_element_by_id('MainContent_SelectedExpenseReport'))\n\n try:\n selected_report.select_by_visible_text('{} - {} - {}'.format(report_info['report_executive_string'], \n report_info['start_date'],\n report_info['end_date']))\n except NoSuchElementException:\n back_button = chrome_driver.find_element_by_id('MainContent_AAWiz__Back')\n back_button.click()\n\n create_new_report(chrome_driver, report_info)\n\n\n next_button_2= chrome_driver.find_element_by_id('MainContent_AAWiz__Next')\n next_button_2.click()\n\n finished_users.append(the_name)\n\n current_user_dropdown = Select(chrome_driver.find_element_by_id('CurrentUserDropdown'))\n current_user_dropdown.select_by_visible_text(report_info['user_name'])\n\n print('Did not finish: {}'.format(did_not_finish_list))\n\n\nif __name__ == '__main__':\n execute_expense_report()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import json
def main():
with open('./src/test/predictions.json', 'r') as f:
data = json.load(f)
total = len(data['label'])
google = 0
sphinx = 0
for i in range(len(data['label'])):
label = data['label'][i]
google_entry = data['google'][i]
sphinx_entry = data['pocket_sphinx'][i]
if google_entry == label:
google += 1
if sphinx_entry == label:
sphinx += 1
print('Google %d out of %d: %.4f' %(google, total, google/total))
print('Pocket Sphinx %d out of %d: %.4f' %(sphinx, total, sphinx/total))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "9fc184fe3aa498138138403bef719c59b85b3a80",
"index": 4392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n print('Google %d out of %d: %.4f' % (google, total, google / total))\n print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n print('Google %d out of %d: %.4f' % (google, total, google / total))\n print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n print('Google %d out of %d: %.4f' % (google, total, google / total))\n print('Pocket Sphinx %d out of %d: %.4f' % (sphinx, total, sphinx / total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import json\n\n\ndef main():\n with open('./src/test/predictions.json', 'r') as f:\n data = json.load(f)\n \n total = len(data['label'])\n google = 0\n sphinx = 0\n for i in range(len(data['label'])):\n label = data['label'][i]\n google_entry = data['google'][i]\n sphinx_entry = data['pocket_sphinx'][i]\n\n if google_entry == label:\n google += 1\n if sphinx_entry == label:\n sphinx += 1\n \n print('Google %d out of %d: %.4f' %(google, total, google/total))\n print('Pocket Sphinx %d out of %d: %.4f' %(sphinx, total, sphinx/total))\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#coding=UTF-8
import random
import random
list=[]
s=0
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if i!=j and j<>k:
list.append(str(i)+str(j)+str(k))
s=s+1
print len(list)
print s
if len(list)==s:
print "是相等的!"
else:
print "不相等!"
print list[random.randrange(1,len(list))]
import math
for n in range(1,1):
i=math.sqrt(n+100)
print i
j=math.sqrt(n+268)
print j
if i/2.0==int(i/2) and j/2.0==int(j/2):
print n
break
import time
#print help(time.strftime)
print time.strftime("%Y")
list=[90,19,8,99,87,45,109]
list.sort()
print u"sort排序输出:",list
list=[90,19,8,99,87,45,109]
i=len(list)
for b in range(1,i):
i=i-1
for a in range(0,i):
if list[a+1]<list[a]:
temp=list[a+1]
list[a+1]=list[a]
list[a]=temp
print u"冒泡排序输出:",list
print '*'*10
for i in range(5):
print "* *"
print '*'*10
import sys
#sys.stdout.write(chr(1))
temp=0#正常产仔的兔子
temp1=0#剩余一个月产仔的兔子
temp2=1#剩余2个月产仔的兔子
m=12#int(raw_input(u"请输入月份:"))
for i in range(1,m+1):
temp=temp+temp1
temp22=temp2
temp2=temp
temp1=temp22
print "24个月后的兔子数量:",temp+temp1+temp2
f1=1
f2=1
for i in range(1,24):
#print "%12d%12d"%(f1,f1)
if (i%2)==0:
print ''
f1=f1+f2
f2=f1+f2
for i in range(1,10):
for j in range(0,10):
for k in range(0,10):
if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):
print int(str(i)+str(j)+str(k))
import sys
from sys import stdout
n=45
print '数值:n=%d'%n
list=[]
for i in range(2,n+1):
while n!=0:
if n%i==0:
list.append(str(i))
sys.stdout.write(str(i))
sys.stdout.write("*")
n=n/i
else:
break
print "%d"%n
for i in range(0,len(list)):
if i<len(list)-1:
sys.stdout.write(list[i]+"*")
else:
sys.stdout.write(list[i])
h=100
sum=0
for i in range(1,11):
if i==1:
print ''
sum=sum+h
h=h/2.0
sum=sum+2*h
print h
print sum
|
normal
|
{
"blob_id": "fa07553477e3bb2ecbeb87bd1383a2194282579c",
"index": 4081,
"step-1": "#coding=UTF-8\nimport random\nimport random\nlist=[]\ns=0\nfor i in range(1,5):\n for j in range(1,5):\n for k in range(1,5):\n if i!=j and j<>k:\n list.append(str(i)+str(j)+str(k))\n s=s+1\nprint len(list)\nprint s\nif len(list)==s:\n print \"是相等的!\"\nelse:\n print \"不相等!\"\nprint list[random.randrange(1,len(list))]\n\n\nimport math\nfor n in range(1,1):\n i=math.sqrt(n+100)\n print i\n j=math.sqrt(n+268)\n print j\n if i/2.0==int(i/2) and j/2.0==int(j/2):\n print n\n break\n \nimport time\n#print help(time.strftime)\nprint time.strftime(\"%Y\")\n\n\n\nlist=[90,19,8,99,87,45,109]\nlist.sort()\nprint u\"sort排序输出:\",list\nlist=[90,19,8,99,87,45,109]\ni=len(list)\nfor b in range(1,i):\n i=i-1\n for a in range(0,i):\n if list[a+1]<list[a]:\n temp=list[a+1]\n list[a+1]=list[a] \n list[a]=temp\nprint u\"冒泡排序输出:\",list\n\n\n\n\nprint '*'*10\nfor i in range(5):\n print \"* *\"\nprint '*'*10\n\n\n\nimport sys\n#sys.stdout.write(chr(1))\n\n\n\n\n\ntemp=0#正常产仔的兔子\ntemp1=0#剩余一个月产仔的兔子\ntemp2=1#剩余2个月产仔的兔子\nm=12#int(raw_input(u\"请输入月份:\"))\nfor i in range(1,m+1):\n temp=temp+temp1\n temp22=temp2\n temp2=temp\n temp1=temp22\nprint \"24个月后的兔子数量:\",temp+temp1+temp2\n\nf1=1\nf2=1\nfor i in range(1,24): \n #print \"%12d%12d\"%(f1,f1)\n if (i%2)==0:\n print ''\n f1=f1+f2\n f2=f1+f2\n\nfor i in range(1,10):\n for j in range(0,10):\n for k in range(0,10):\n if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):\n print int(str(i)+str(j)+str(k))\n\nimport sys\nfrom sys import stdout\nn=45\nprint '数值:n=%d'%n\nlist=[]\nfor i in range(2,n+1):\n while n!=0:\n if n%i==0:\n list.append(str(i))\n sys.stdout.write(str(i))\n sys.stdout.write(\"*\")\n n=n/i\n else:\n break\n print \"%d\"%n\nfor i in range(0,len(list)):\n if i<len(list)-1:\n sys.stdout.write(list[i]+\"*\")\n else:\n sys.stdout.write(list[i])\n\nh=100\nsum=0\nfor i in range(1,11):\n if i==1:\n print ''\n sum=sum+h\n h=h/2.0\n sum=sum+2*h\nprint h\nprint sum\n\n\n\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ICrawlerLog:
<|reserved_special_token_0|>
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ICrawlerLog:
level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging
.CRITICAL}
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ICrawlerLog:
level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging
.CRITICAL}
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
def log(name):
def wraaper(func):
def inner(*args, **kwargs):
log = ICrawlerLog(name).save
log.info('{}开始执行'.format(func))
try:
result = func(*args, **kwargs)
if result:
log.info('{}执行成功'.format(func))
return result
else:
log.error('{}执行后返回值为空'.format(func))
return None
except Exception as e:
log.error('{}程序异常执行失败,程序终止'.format(func))
log.error(e)
return False
return inner
return wraaper
<|reserved_special_token_1|>
from SpiderTools.tool import platform_system
from SpidersLog.file_handler import SafeFileHandler
from Env.parse_yaml import FileConfigParser
from Env import log_variable as lv
from staticparm import root_path
from SpiderTools.tool import get_username
import logging
import logging.handlers
import traceback
class ICrawlerLog:
level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging
.CRITICAL}
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
"""
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
"""
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),
key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=
platform_system(), key='log')
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(
self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear()
if not self.logger.handlers:
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +
'%s %s %s %s %s ' % (group_code, job_code, jobinst_id,
fire_time, address_code) + '%(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
fh.close()
return self.logger
def log(name):
def wraaper(func):
def inner(*args, **kwargs):
log = ICrawlerLog(name).save
log.info('{}开始执行'.format(func))
try:
result = func(*args, **kwargs)
if result:
log.info('{}执行成功'.format(func))
return result
else:
log.error('{}执行后返回值为空'.format(func))
return None
except Exception as e:
log.error('{}程序异常执行失败,程序终止'.format(func))
log.error(e)
return False
return inner
return wraaper
<|reserved_special_token_1|>
# encoding: utf-8
from SpiderTools.tool import platform_system
from SpidersLog.file_handler import SafeFileHandler
from Env.parse_yaml import FileConfigParser
from Env import log_variable as lv
from staticparm import root_path
from SpiderTools.tool import get_username
import logging
import logging.handlers
import traceback
class ICrawlerLog:
level_relations = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'crit': logging.CRITICAL
} # 日志级别关系映射
def __init__(self, name, logger=None):
self.logger = logger
self.name = name
@property
def save(self, *args, **kwargs):
'''
指定保存日志的文件路径,日志级别,以及调用文件
将日志存入到指定的文件中
'''
jobinst_id = lv.get_jobinst_id()
job_code = lv.get_job_code()
fire_time = lv.get_fire_time()
group_code = lv.get_group_code()
address_code = lv.get_address_code()
# year = time.strftime('%Y', time.localtime()) # 获取完整年份
# month = time.strftime('%m', time.localtime()) # 获取月
# day = time.strftime('%d', time.localtime()) # 获取日
# 创建一个logger
self.logger = logging.getLogger(self.logger)
self.logger.setLevel(logging.INFO)
# 创建一个handler,用于写入日志文件
# self.log_time = time.strftime("%Y_%m_%d_")
if platform_system() == 'Linux':
log_path = FileConfigParser().get_path(server=platform_system(),key='log-cb')
if platform_system() == 'Windows':
log_path = root_path + FileConfigParser().get_path(server=platform_system(), key='log')
# log_path = './Logs/'
# log_path = '/home/ijep/domain/logs/python/'
# log_name = log_path + 'icrawlerspider.spider.%s-%s-%s.log' % (year, month, day)
if self.name == 'spider':
name = 'icrawlerspider.spider.log'
elif self.name == 'middleware':
name = 'icrawlerspider.middleware.log'
log_name = log_path + name
filename = self.logger.handlers[0].baseFilename.split('\\')[-1] if len(self.logger.handlers) > 0 else ''
if log_name.split('/')[-1] != filename:
self.logger.handlers.clear() # 多个不同文件名的情况下用这个
if not self.logger.handlers:
# 追加模式,按照日期来设置日志,handlers中TimedRotatingFileHandler就是按照日期来设置,RotatingFileHandler这个按照文件大小来设置
# fh = logging.handlers.TimedRotatingFileHandler(log_name, when='D', interval=1, encoding='utf-8')
fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')
# fh.setLevel(logging.INFO)
# 定义handler的输出格式
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' + '%s %s %s %s %s '
% (group_code, job_code, jobinst_id, fire_time, address_code) + '%(message)s')
# '%(filename)s->%(funcName)s line:%(lineno)d
fh.setFormatter(formatter)
# 给logger添加handler
self.logger.addHandler(fh)
# 添加下面一句,在记录日志之后移除句柄
# self.logger.info('记录数据')
# self.logger.removeHandler(fh)
# 关闭打开的文件
fh.close()
return self.logger
def log(name):
def wraaper(func):
def inner(*args, **kwargs): # 如果想返回result必须再包裹一层
log = ICrawlerLog(name).save
log.info("{}开始执行".format(func))
try:
result = func(*args, **kwargs) # 如果不是在类的函数里使用装饰器就可以这么写,如果这么写会报需要self入参(因为你是用类作为装饰器,函数就不会这样)
if result:
log.info("{}执行成功".format(func))
# log.info("结果是: %s" % result)
return result
else:
log.error("{}执行后返回值为空".format(func))
return None
except Exception as e:
# traceback.print_exc()
log.error("{}程序异常执行失败,程序终止".format(func))
log.error(e)
return False
return inner
return wraaper
|
flexible
|
{
"blob_id": "63001128d9cb934d6f9d57db668a43ba58f4ece3",
"index": 1679,
"step-1": "<mask token>\n\n\nclass ICrawlerLog:\n <mask token>\n\n def __init__(self, name, logger=None):\n self.logger = logger\n self.name = name\n\n @property\n def save(self, *args, **kwargs):\n \"\"\"\n 指定保存日志的文件路径,日志级别,以及调用文件\n 将日志存入到指定的文件中\n \"\"\"\n jobinst_id = lv.get_jobinst_id()\n job_code = lv.get_job_code()\n fire_time = lv.get_fire_time()\n group_code = lv.get_group_code()\n address_code = lv.get_address_code()\n self.logger = logging.getLogger(self.logger)\n self.logger.setLevel(logging.INFO)\n if platform_system() == 'Linux':\n log_path = FileConfigParser().get_path(server=platform_system(),\n key='log-cb')\n if platform_system() == 'Windows':\n log_path = root_path + FileConfigParser().get_path(server=\n platform_system(), key='log')\n if self.name == 'spider':\n name = 'icrawlerspider.spider.log'\n elif self.name == 'middleware':\n name = 'icrawlerspider.middleware.log'\n log_name = log_path + name\n filename = self.logger.handlers[0].baseFilename.split('\\\\')[-1] if len(\n self.logger.handlers) > 0 else ''\n if log_name.split('/')[-1] != filename:\n self.logger.handlers.clear()\n if not self.logger.handlers:\n fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +\n '%s %s %s %s %s ' % (group_code, job_code, jobinst_id,\n fire_time, address_code) + '%(message)s')\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n fh.close()\n return self.logger\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ICrawlerLog:\n level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,\n 'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging\n .CRITICAL}\n\n def __init__(self, name, logger=None):\n self.logger = logger\n self.name = name\n\n @property\n def save(self, *args, **kwargs):\n \"\"\"\n 指定保存日志的文件路径,日志级别,以及调用文件\n 将日志存入到指定的文件中\n \"\"\"\n jobinst_id = lv.get_jobinst_id()\n job_code = lv.get_job_code()\n fire_time = lv.get_fire_time()\n group_code = lv.get_group_code()\n address_code = lv.get_address_code()\n self.logger = logging.getLogger(self.logger)\n self.logger.setLevel(logging.INFO)\n if platform_system() == 'Linux':\n log_path = FileConfigParser().get_path(server=platform_system(),\n key='log-cb')\n if platform_system() == 'Windows':\n log_path = root_path + FileConfigParser().get_path(server=\n platform_system(), key='log')\n if self.name == 'spider':\n name = 'icrawlerspider.spider.log'\n elif self.name == 'middleware':\n name = 'icrawlerspider.middleware.log'\n log_name = log_path + name\n filename = self.logger.handlers[0].baseFilename.split('\\\\')[-1] if len(\n self.logger.handlers) > 0 else ''\n if log_name.split('/')[-1] != filename:\n self.logger.handlers.clear()\n if not self.logger.handlers:\n fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +\n '%s %s %s %s %s ' % (group_code, job_code, jobinst_id,\n fire_time, address_code) + '%(message)s')\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n fh.close()\n return self.logger\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ICrawlerLog:\n level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,\n 'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging\n .CRITICAL}\n\n def __init__(self, name, logger=None):\n self.logger = logger\n self.name = name\n\n @property\n def save(self, *args, **kwargs):\n \"\"\"\n 指定保存日志的文件路径,日志级别,以及调用文件\n 将日志存入到指定的文件中\n \"\"\"\n jobinst_id = lv.get_jobinst_id()\n job_code = lv.get_job_code()\n fire_time = lv.get_fire_time()\n group_code = lv.get_group_code()\n address_code = lv.get_address_code()\n self.logger = logging.getLogger(self.logger)\n self.logger.setLevel(logging.INFO)\n if platform_system() == 'Linux':\n log_path = FileConfigParser().get_path(server=platform_system(),\n key='log-cb')\n if platform_system() == 'Windows':\n log_path = root_path + FileConfigParser().get_path(server=\n platform_system(), key='log')\n if self.name == 'spider':\n name = 'icrawlerspider.spider.log'\n elif self.name == 'middleware':\n name = 'icrawlerspider.middleware.log'\n log_name = log_path + name\n filename = self.logger.handlers[0].baseFilename.split('\\\\')[-1] if len(\n self.logger.handlers) > 0 else ''\n if log_name.split('/')[-1] != filename:\n self.logger.handlers.clear()\n if not self.logger.handlers:\n fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +\n '%s %s %s %s %s ' % (group_code, job_code, jobinst_id,\n fire_time, address_code) + '%(message)s')\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n fh.close()\n return self.logger\n\n\ndef log(name):\n\n def wraaper(func):\n\n def inner(*args, **kwargs):\n log = ICrawlerLog(name).save\n log.info('{}开始执行'.format(func))\n try:\n result = func(*args, **kwargs)\n if result:\n log.info('{}执行成功'.format(func))\n return result\n else:\n log.error('{}执行后返回值为空'.format(func))\n return None\n except Exception as e:\n log.error('{}程序异常执行失败,程序终止'.format(func))\n log.error(e)\n return False\n return inner\n return wraaper\n",
"step-4": "from SpiderTools.tool import platform_system\nfrom SpidersLog.file_handler import SafeFileHandler\nfrom Env.parse_yaml import FileConfigParser\nfrom Env import log_variable as lv\nfrom staticparm import root_path\nfrom SpiderTools.tool import get_username\nimport logging\nimport logging.handlers\nimport traceback\n\n\nclass ICrawlerLog:\n level_relations = {'debug': logging.DEBUG, 'info': logging.INFO,\n 'warning': logging.WARNING, 'error': logging.ERROR, 'crit': logging\n .CRITICAL}\n\n def __init__(self, name, logger=None):\n self.logger = logger\n self.name = name\n\n @property\n def save(self, *args, **kwargs):\n \"\"\"\n 指定保存日志的文件路径,日志级别,以及调用文件\n 将日志存入到指定的文件中\n \"\"\"\n jobinst_id = lv.get_jobinst_id()\n job_code = lv.get_job_code()\n fire_time = lv.get_fire_time()\n group_code = lv.get_group_code()\n address_code = lv.get_address_code()\n self.logger = logging.getLogger(self.logger)\n self.logger.setLevel(logging.INFO)\n if platform_system() == 'Linux':\n log_path = FileConfigParser().get_path(server=platform_system(),\n key='log-cb')\n if platform_system() == 'Windows':\n log_path = root_path + FileConfigParser().get_path(server=\n platform_system(), key='log')\n if self.name == 'spider':\n name = 'icrawlerspider.spider.log'\n elif self.name == 'middleware':\n name = 'icrawlerspider.middleware.log'\n log_name = log_path + name\n filename = self.logger.handlers[0].baseFilename.split('\\\\')[-1] if len(\n self.logger.handlers) > 0 else ''\n if log_name.split('/')[-1] != filename:\n self.logger.handlers.clear()\n if not self.logger.handlers:\n fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' +\n '%s %s %s %s %s ' % (group_code, job_code, jobinst_id,\n fire_time, address_code) + '%(message)s')\n fh.setFormatter(formatter)\n self.logger.addHandler(fh)\n fh.close()\n return self.logger\n\n\ndef log(name):\n\n def wraaper(func):\n\n def inner(*args, **kwargs):\n log = ICrawlerLog(name).save\n log.info('{}开始执行'.format(func))\n try:\n result = func(*args, **kwargs)\n if result:\n log.info('{}执行成功'.format(func))\n return result\n else:\n log.error('{}执行后返回值为空'.format(func))\n return None\n except Exception as e:\n log.error('{}程序异常执行失败,程序终止'.format(func))\n log.error(e)\n return False\n return inner\n return wraaper\n",
"step-5": "# encoding: utf-8\nfrom SpiderTools.tool import platform_system\nfrom SpidersLog.file_handler import SafeFileHandler\nfrom Env.parse_yaml import FileConfigParser\nfrom Env import log_variable as lv\nfrom staticparm import root_path\nfrom SpiderTools.tool import get_username\nimport logging\nimport logging.handlers\nimport traceback\n\n\nclass ICrawlerLog:\n level_relations = {\n 'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'crit': logging.CRITICAL\n } # 日志级别关系映射\n\n def __init__(self, name, logger=None):\n self.logger = logger\n self.name = name\n\n @property\n def save(self, *args, **kwargs):\n '''\n 指定保存日志的文件路径,日志级别,以及调用文件\n 将日志存入到指定的文件中\n '''\n jobinst_id = lv.get_jobinst_id()\n job_code = lv.get_job_code()\n fire_time = lv.get_fire_time()\n group_code = lv.get_group_code()\n address_code = lv.get_address_code()\n\n # year = time.strftime('%Y', time.localtime()) # 获取完整年份\n # month = time.strftime('%m', time.localtime()) # 获取月\n # day = time.strftime('%d', time.localtime()) # 获取日\n\n # 创建一个logger\n self.logger = logging.getLogger(self.logger)\n self.logger.setLevel(logging.INFO)\n # 创建一个handler,用于写入日志文件\n # self.log_time = time.strftime(\"%Y_%m_%d_\")\n\n if platform_system() == 'Linux':\n log_path = FileConfigParser().get_path(server=platform_system(),key='log-cb')\n if platform_system() == 'Windows':\n log_path = root_path + FileConfigParser().get_path(server=platform_system(), key='log')\n # log_path = './Logs/'\n # log_path = '/home/ijep/domain/logs/python/'\n # log_name = log_path + 'icrawlerspider.spider.%s-%s-%s.log' % (year, month, day)\n if self.name == 'spider':\n name = 'icrawlerspider.spider.log'\n elif self.name == 'middleware':\n name = 'icrawlerspider.middleware.log'\n\n log_name = log_path + name\n\n filename = self.logger.handlers[0].baseFilename.split('\\\\')[-1] if len(self.logger.handlers) > 0 else ''\n\n if log_name.split('/')[-1] != filename:\n self.logger.handlers.clear() # 多个不同文件名的情况下用这个\n\n if not self.logger.handlers:\n # 追加模式,按照日期来设置日志,handlers中TimedRotatingFileHandler就是按照日期来设置,RotatingFileHandler这个按照文件大小来设置\n # fh = logging.handlers.TimedRotatingFileHandler(log_name, when='D', interval=1, encoding='utf-8')\n fh = SafeFileHandler(log_name, mode='a', encoding='utf-8')\n # fh.setLevel(logging.INFO)\n\n # 定义handler的输出格式\n formatter = logging.Formatter('[%(asctime)s][%(levelname)s] ' + '%s %s %s %s %s '\n % (group_code, job_code, jobinst_id, fire_time, address_code) + '%(message)s')\n # '%(filename)s->%(funcName)s line:%(lineno)d\n\n fh.setFormatter(formatter)\n\n # 给logger添加handler\n self.logger.addHandler(fh)\n\n # 添加下面一句,在记录日志之后移除句柄\n # self.logger.info('记录数据')\n # self.logger.removeHandler(fh)\n # 关闭打开的文件\n fh.close()\n return self.logger\n\n\ndef log(name):\n def wraaper(func):\n def inner(*args, **kwargs): # 如果想返回result必须再包裹一层\n log = ICrawlerLog(name).save\n log.info(\"{}开始执行\".format(func))\n try:\n result = func(*args, **kwargs) # 如果不是在类的函数里使用装饰器就可以这么写,如果这么写会报需要self入参(因为你是用类作为装饰器,函数就不会这样)\n if result:\n log.info(\"{}执行成功\".format(func))\n # log.info(\"结果是: %s\" % result)\n return result\n else:\n log.error(\"{}执行后返回值为空\".format(func))\n return None\n except Exception as e:\n # traceback.print_exc()\n log.error(\"{}程序异常执行失败,程序终止\".format(func))\n log.error(e)\n return False\n\n return inner\n\n return wraaper\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def all_subsets(ss, i):
return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))
<|reserved_special_token_0|>
def f1(i):
return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])
def f2(i):
ci = 0
for j in range(len(i)):
ci += pi[int(i[j]) - 1]
resultP = {}
resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[
1]) - 1] * max(0, ci - di[int(i[1]) - 1])
resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[
0]) - 1] * max(0, ci - di[int(i[0]) - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
return mino, v
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def supp(c, chaine):
res = []
if chaine[0] == c:
res = chaine[1:]
for j in range(len(chaine)):
if c == chaine[j]:
res = chaine[0:j] + chaine[j + 1:]
return res
for i in range(n):
pi.append(generation.generer(n)[0][i])
wi.append(generation.generer(n)[1][i])
di.append(generation.generer(n)[2][i])
with open('LesDonnesDyna.csv', 'w') as new_file:
csv_writer = csv.writer(new_file, delimiter='\t')
csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])
for i in range(1, n + 1):
csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])
<|reserved_special_token_0|>
def all_subsets(ss, i):
return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))
<|reserved_special_token_0|>
for subset in all_subsets(k, 2):
combinaison.append(subset)
def f1(i):
return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])
def f2(i):
ci = 0
for j in range(len(i)):
ci += pi[int(i[j]) - 1]
resultP = {}
resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[
1]) - 1] * max(0, ci - di[int(i[1]) - 1])
resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[
0]) - 1] * max(0, ci - di[int(i[0]) - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
return mino, v
<|reserved_special_token_0|>
for i in range(len(combinaison)):
etapeII[combinaison[i]] = f2(combinaison[i])
for i in range(3, len(k) + 1):
combinaison1 = []
for subset in all_subsets(k, i):
combinaison1.append(subset)
for t in range(len(combinaison1)):
resultP = {}
for h in range(len(combinaison1[t])):
ci = 0
for j in range(len(combinaison1[t])):
ci += pi[combinaison1[t][j] - 1]
r = supp(combinaison1[t][h], combinaison1[t])
resultP[etapeII[r][1] + ' => ' + str(combinaison1[t][h])
] = etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, ci -
di[combinaison1[t][h] - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
etapeII[combinaison1[t]] = mino, v
<|reserved_special_token_0|>
with open('resultat.txt', 'w') as resultat:
resultat.write('-la valeur optimal est :' + str(etapeII[k][0]) + '\n')
resultat.write('-La séquence optimale est : \n')
resultat.write('-' * len(str(etapeII[k][1])) + '\n')
resultat.write('|' + str(etapeII[k][1]) + '|\n')
resultat.write('-' * len(str(etapeII[k][1])) + '\n')
resultat.write("Le temps d'execution est : " + str(fin - debut) +
' secondes.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pi = []
wi = []
di = []
n = input('How many tasks do you want to schedule ? \n')
k = tuple(range(1, n + 1))
def supp(c, chaine):
res = []
if chaine[0] == c:
res = chaine[1:]
for j in range(len(chaine)):
if c == chaine[j]:
res = chaine[0:j] + chaine[j + 1:]
return res
for i in range(n):
pi.append(generation.generer(n)[0][i])
wi.append(generation.generer(n)[1][i])
di.append(generation.generer(n)[2][i])
with open('LesDonnesDyna.csv', 'w') as new_file:
csv_writer = csv.writer(new_file, delimiter='\t')
csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])
for i in range(1, n + 1):
csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])
debut = time.time()
def all_subsets(ss, i):
return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))
combinaison = []
for subset in all_subsets(k, 2):
combinaison.append(subset)
def f1(i):
return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])
def f2(i):
ci = 0
for j in range(len(i)):
ci += pi[int(i[j]) - 1]
resultP = {}
resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[
1]) - 1] * max(0, ci - di[int(i[1]) - 1])
resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[
0]) - 1] * max(0, ci - di[int(i[0]) - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
return mino, v
etapeII = {}
for i in range(len(combinaison)):
etapeII[combinaison[i]] = f2(combinaison[i])
for i in range(3, len(k) + 1):
combinaison1 = []
for subset in all_subsets(k, i):
combinaison1.append(subset)
for t in range(len(combinaison1)):
resultP = {}
for h in range(len(combinaison1[t])):
ci = 0
for j in range(len(combinaison1[t])):
ci += pi[combinaison1[t][j] - 1]
r = supp(combinaison1[t][h], combinaison1[t])
resultP[etapeII[r][1] + ' => ' + str(combinaison1[t][h])
] = etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, ci -
di[combinaison1[t][h] - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
etapeII[combinaison1[t]] = mino, v
fin = time.time()
with open('resultat.txt', 'w') as resultat:
resultat.write('-la valeur optimal est :' + str(etapeII[k][0]) + '\n')
resultat.write('-La séquence optimale est : \n')
resultat.write('-' * len(str(etapeII[k][1])) + '\n')
resultat.write('|' + str(etapeII[k][1]) + '|\n')
resultat.write('-' * len(str(etapeII[k][1])) + '\n')
resultat.write("Le temps d'execution est : " + str(fin - debut) +
' secondes.')
<|reserved_special_token_1|>
import csv
from itertools import chain, combinations
import generation
import time
pi = []
wi = []
di = []
n = input('How many tasks do you want to schedule ? \n')
k = tuple(range(1, n + 1))
def supp(c, chaine):
res = []
if chaine[0] == c:
res = chaine[1:]
for j in range(len(chaine)):
if c == chaine[j]:
res = chaine[0:j] + chaine[j + 1:]
return res
for i in range(n):
pi.append(generation.generer(n)[0][i])
wi.append(generation.generer(n)[1][i])
di.append(generation.generer(n)[2][i])
with open('LesDonnesDyna.csv', 'w') as new_file:
csv_writer = csv.writer(new_file, delimiter='\t')
csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])
for i in range(1, n + 1):
csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])
debut = time.time()
def all_subsets(ss, i):
return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))
combinaison = []
for subset in all_subsets(k, 2):
combinaison.append(subset)
def f1(i):
return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])
def f2(i):
ci = 0
for j in range(len(i)):
ci += pi[int(i[j]) - 1]
resultP = {}
resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[
1]) - 1] * max(0, ci - di[int(i[1]) - 1])
resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[
0]) - 1] * max(0, ci - di[int(i[0]) - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
return mino, v
etapeII = {}
for i in range(len(combinaison)):
etapeII[combinaison[i]] = f2(combinaison[i])
for i in range(3, len(k) + 1):
combinaison1 = []
for subset in all_subsets(k, i):
combinaison1.append(subset)
for t in range(len(combinaison1)):
resultP = {}
for h in range(len(combinaison1[t])):
ci = 0
for j in range(len(combinaison1[t])):
ci += pi[combinaison1[t][j] - 1]
r = supp(combinaison1[t][h], combinaison1[t])
resultP[etapeII[r][1] + ' => ' + str(combinaison1[t][h])
] = etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, ci -
di[combinaison1[t][h] - 1])
mino = min(resultP.values())
n = resultP.values().index(mino)
v = resultP.keys()[n]
etapeII[combinaison1[t]] = mino, v
fin = time.time()
with open('resultat.txt', 'w') as resultat:
resultat.write('-la valeur optimal est :' + str(etapeII[k][0]) + '\n')
resultat.write('-La séquence optimale est : \n')
resultat.write('-' * len(str(etapeII[k][1])) + '\n')
resultat.write('|' + str(etapeII[k][1]) + '|\n')
resultat.write('-' * len(str(etapeII[k][1])) + '\n')
resultat.write("Le temps d'execution est : " + str(fin - debut) +
' secondes.')
<|reserved_special_token_1|>
import csv
from itertools import chain, combinations
import generation
import time
pi = []
wi = []
di = []
n = input("How many tasks do you want to schedule ? \n")
k=tuple(range(1,n+1))
#la fonction qui supprime un element dans l'ensemble des taches, elle facilite comment retrouver les sous taches de J
def supp(c,chaine):
res=[]
if chaine[0]==c:
res=chaine[1:]
for j in range(len(chaine)):
if c==chaine[j]:
res=chaine[0:j]+chaine[j+1:]
return res
# Cette boucle pour generer les donnees des taches en respectant les contraintes du poids
# chose qui existe deja dans la fonction generer
for i in range(n):
pi.append(generation.generer(n)[0][i])
wi.append(generation.generer(n)[1][i])
di.append(generation.generer(n)[2][i])
#enregistrer les donnees generes dans un fichier csv sous le nom LesDonnesDyna
with open("LesDonnesDyna.csv", "w") as new_file:
csv_writer = csv.writer(new_file, delimiter='\t')
csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])
for i in range(1, n + 1):
csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])
#pour generer les sequenses possibles
# i c'est le nombre de chaque sequence
debut = time.time()
def all_subsets(ss,i):
return chain(*map(lambda x: combinations(ss, x), range(i,i+1)))
#Combinaison est une liste qui stoque les differentes sous listes (sous taches)
combinaison=[]
for subset in all_subsets(k,2):
combinaison.append(subset)
#pour calculer la condition initiale qui est l'execution d'une seule tache
def f1(i):
return wi[i[0]-1]*max(0,(pi[i[0]-1]-di[i[0]-1]))
#pour calculer les valeurs optimales de 2 combinaisons
def f2(i):
ci=0
for j in range(len(i)):
ci+=pi[int(i[j])-1]
resultP={}
resultP[str(i[0])+" => "+str(i[1])]=f1(supp(i[1],i))+wi[int(i[1])-1]*max(0,(ci-di[int(i[1])-1]))
resultP[str(i[1])+" => "+str(i[0])]=f1(supp(i[0],i))+wi[int(i[0])-1]*max(0,(ci-di[int(i[0])-1]))
#min=min_dic(resultP)
mino=min(resultP.values())
n=resultP.values().index(mino)
v=resultP.keys()[n]
#chemin.append(v)
return mino,v
etapeII={}
for i in range(len(combinaison)):
etapeII[combinaison[i]]=f2(combinaison[i])
for i in range(3,len(k)+1):
combinaison1=[]
for subset in all_subsets(k,i):
combinaison1.append(subset)
for t in range(len(combinaison1)):
resultP={}
for h in range(len(combinaison1[t])):
ci=0
for j in range(len(combinaison1[t])):
ci+=pi[combinaison1[t][j] - 1]
r=supp(combinaison1[t][h], combinaison1[t])
resultP[etapeII[r][1] +' => ' + str(combinaison1[t][h])]= etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, (ci - (di[combinaison1[t][h] - 1])))
mino=min(resultP.values())
n=resultP.values().index(mino)
v=resultP.keys()[n]
etapeII[combinaison1[t]]=(mino, v)
fin = time.time()
with open('resultat.txt','w') as resultat:
resultat.write("-la valeur optimal est :"+str(etapeII[k][0])+"\n")
resultat.write("-La séquence optimale est : \n")
resultat.write("-"*len(str(etapeII[k][1])) +"\n")
resultat.write('|'+str(etapeII[k][1])+'|\n')
resultat.write("-"*len(str(etapeII[k][1])) +"\n")
resultat.write("Le temps d'execution est : "+str(fin - debut)+" secondes.")
|
flexible
|
{
"blob_id": "ddab4d014c000dd96bad932adac75e4eec065483",
"index": 9644,
"step-1": "<mask token>\n\n\ndef all_subsets(ss, i):\n return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))\n\n\n<mask token>\n\n\ndef f1(i):\n return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])\n\n\ndef f2(i):\n ci = 0\n for j in range(len(i)):\n ci += pi[int(i[j]) - 1]\n resultP = {}\n resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[\n 1]) - 1] * max(0, ci - di[int(i[1]) - 1])\n resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[\n 0]) - 1] * max(0, ci - di[int(i[0]) - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n return mino, v\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef supp(c, chaine):\n res = []\n if chaine[0] == c:\n res = chaine[1:]\n for j in range(len(chaine)):\n if c == chaine[j]:\n res = chaine[0:j] + chaine[j + 1:]\n return res\n\n\nfor i in range(n):\n pi.append(generation.generer(n)[0][i])\n wi.append(generation.generer(n)[1][i])\n di.append(generation.generer(n)[2][i])\nwith open('LesDonnesDyna.csv', 'w') as new_file:\n csv_writer = csv.writer(new_file, delimiter='\\t')\n csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])\n for i in range(1, n + 1):\n csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])\n<mask token>\n\n\ndef all_subsets(ss, i):\n return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))\n\n\n<mask token>\nfor subset in all_subsets(k, 2):\n combinaison.append(subset)\n\n\ndef f1(i):\n return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])\n\n\ndef f2(i):\n ci = 0\n for j in range(len(i)):\n ci += pi[int(i[j]) - 1]\n resultP = {}\n resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[\n 1]) - 1] * max(0, ci - di[int(i[1]) - 1])\n resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[\n 0]) - 1] * max(0, ci - di[int(i[0]) - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n return mino, v\n\n\n<mask token>\nfor i in range(len(combinaison)):\n etapeII[combinaison[i]] = f2(combinaison[i])\nfor i in range(3, len(k) + 1):\n combinaison1 = []\n for subset in all_subsets(k, i):\n combinaison1.append(subset)\n for t in range(len(combinaison1)):\n resultP = {}\n for h in range(len(combinaison1[t])):\n ci = 0\n for j in range(len(combinaison1[t])):\n ci += pi[combinaison1[t][j] - 1]\n r = supp(combinaison1[t][h], combinaison1[t])\n resultP[etapeII[r][1] + ' => ' + str(combinaison1[t][h])\n ] = etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, ci -\n di[combinaison1[t][h] - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n etapeII[combinaison1[t]] = mino, v\n<mask token>\nwith open('resultat.txt', 'w') as resultat:\n resultat.write('-la valeur optimal est :' + str(etapeII[k][0]) + '\\n')\n resultat.write('-La séquence optimale est : \\n')\n resultat.write('-' * len(str(etapeII[k][1])) + '\\n')\n resultat.write('|' + str(etapeII[k][1]) + '|\\n')\n resultat.write('-' * len(str(etapeII[k][1])) + '\\n')\n resultat.write(\"Le temps d'execution est : \" + str(fin - debut) +\n ' secondes.')\n",
"step-3": "<mask token>\npi = []\nwi = []\ndi = []\nn = input('How many tasks do you want to schedule ? \\n')\nk = tuple(range(1, n + 1))\n\n\ndef supp(c, chaine):\n res = []\n if chaine[0] == c:\n res = chaine[1:]\n for j in range(len(chaine)):\n if c == chaine[j]:\n res = chaine[0:j] + chaine[j + 1:]\n return res\n\n\nfor i in range(n):\n pi.append(generation.generer(n)[0][i])\n wi.append(generation.generer(n)[1][i])\n di.append(generation.generer(n)[2][i])\nwith open('LesDonnesDyna.csv', 'w') as new_file:\n csv_writer = csv.writer(new_file, delimiter='\\t')\n csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])\n for i in range(1, n + 1):\n csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])\ndebut = time.time()\n\n\ndef all_subsets(ss, i):\n return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))\n\n\ncombinaison = []\nfor subset in all_subsets(k, 2):\n combinaison.append(subset)\n\n\ndef f1(i):\n return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])\n\n\ndef f2(i):\n ci = 0\n for j in range(len(i)):\n ci += pi[int(i[j]) - 1]\n resultP = {}\n resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[\n 1]) - 1] * max(0, ci - di[int(i[1]) - 1])\n resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[\n 0]) - 1] * max(0, ci - di[int(i[0]) - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n return mino, v\n\n\netapeII = {}\nfor i in range(len(combinaison)):\n etapeII[combinaison[i]] = f2(combinaison[i])\nfor i in range(3, len(k) + 1):\n combinaison1 = []\n for subset in all_subsets(k, i):\n combinaison1.append(subset)\n for t in range(len(combinaison1)):\n resultP = {}\n for h in range(len(combinaison1[t])):\n ci = 0\n for j in range(len(combinaison1[t])):\n ci += pi[combinaison1[t][j] - 1]\n r = supp(combinaison1[t][h], combinaison1[t])\n resultP[etapeII[r][1] + ' => ' + str(combinaison1[t][h])\n ] = etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, ci -\n di[combinaison1[t][h] - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n etapeII[combinaison1[t]] = mino, v\nfin = time.time()\nwith open('resultat.txt', 'w') as resultat:\n resultat.write('-la valeur optimal est :' + str(etapeII[k][0]) + '\\n')\n resultat.write('-La séquence optimale est : \\n')\n resultat.write('-' * len(str(etapeII[k][1])) + '\\n')\n resultat.write('|' + str(etapeII[k][1]) + '|\\n')\n resultat.write('-' * len(str(etapeII[k][1])) + '\\n')\n resultat.write(\"Le temps d'execution est : \" + str(fin - debut) +\n ' secondes.')\n",
"step-4": "import csv\nfrom itertools import chain, combinations\nimport generation\nimport time\npi = []\nwi = []\ndi = []\nn = input('How many tasks do you want to schedule ? \\n')\nk = tuple(range(1, n + 1))\n\n\ndef supp(c, chaine):\n res = []\n if chaine[0] == c:\n res = chaine[1:]\n for j in range(len(chaine)):\n if c == chaine[j]:\n res = chaine[0:j] + chaine[j + 1:]\n return res\n\n\nfor i in range(n):\n pi.append(generation.generer(n)[0][i])\n wi.append(generation.generer(n)[1][i])\n di.append(generation.generer(n)[2][i])\nwith open('LesDonnesDyna.csv', 'w') as new_file:\n csv_writer = csv.writer(new_file, delimiter='\\t')\n csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])\n for i in range(1, n + 1):\n csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])\ndebut = time.time()\n\n\ndef all_subsets(ss, i):\n return chain(*map(lambda x: combinations(ss, x), range(i, i + 1)))\n\n\ncombinaison = []\nfor subset in all_subsets(k, 2):\n combinaison.append(subset)\n\n\ndef f1(i):\n return wi[i[0] - 1] * max(0, pi[i[0] - 1] - di[i[0] - 1])\n\n\ndef f2(i):\n ci = 0\n for j in range(len(i)):\n ci += pi[int(i[j]) - 1]\n resultP = {}\n resultP[str(i[0]) + ' => ' + str(i[1])] = f1(supp(i[1], i)) + wi[int(i[\n 1]) - 1] * max(0, ci - di[int(i[1]) - 1])\n resultP[str(i[1]) + ' => ' + str(i[0])] = f1(supp(i[0], i)) + wi[int(i[\n 0]) - 1] * max(0, ci - di[int(i[0]) - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n return mino, v\n\n\netapeII = {}\nfor i in range(len(combinaison)):\n etapeII[combinaison[i]] = f2(combinaison[i])\nfor i in range(3, len(k) + 1):\n combinaison1 = []\n for subset in all_subsets(k, i):\n combinaison1.append(subset)\n for t in range(len(combinaison1)):\n resultP = {}\n for h in range(len(combinaison1[t])):\n ci = 0\n for j in range(len(combinaison1[t])):\n ci += pi[combinaison1[t][j] - 1]\n r = supp(combinaison1[t][h], combinaison1[t])\n resultP[etapeII[r][1] + ' => ' + str(combinaison1[t][h])\n ] = etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, ci -\n di[combinaison1[t][h] - 1])\n mino = min(resultP.values())\n n = resultP.values().index(mino)\n v = resultP.keys()[n]\n etapeII[combinaison1[t]] = mino, v\nfin = time.time()\nwith open('resultat.txt', 'w') as resultat:\n resultat.write('-la valeur optimal est :' + str(etapeII[k][0]) + '\\n')\n resultat.write('-La séquence optimale est : \\n')\n resultat.write('-' * len(str(etapeII[k][1])) + '\\n')\n resultat.write('|' + str(etapeII[k][1]) + '|\\n')\n resultat.write('-' * len(str(etapeII[k][1])) + '\\n')\n resultat.write(\"Le temps d'execution est : \" + str(fin - debut) +\n ' secondes.')\n",
"step-5": "import csv\r\nfrom itertools import chain, combinations\r\nimport generation\r\nimport time\r\npi = []\r\nwi = []\r\ndi = []\r\nn = input(\"How many tasks do you want to schedule ? \\n\")\r\n\r\nk=tuple(range(1,n+1))\r\n#la fonction qui supprime un element dans l'ensemble des taches, elle facilite comment retrouver les sous taches de J\r\ndef supp(c,chaine):\r\n res=[]\r\n if chaine[0]==c:\r\n res=chaine[1:]\r\n for j in range(len(chaine)):\r\n if c==chaine[j]:\r\n res=chaine[0:j]+chaine[j+1:]\r\n return res\r\n\r\n# Cette boucle pour generer les donnees des taches en respectant les contraintes du poids\r\n# chose qui existe deja dans la fonction generer\r\nfor i in range(n):\r\n pi.append(generation.generer(n)[0][i])\r\n wi.append(generation.generer(n)[1][i])\r\n di.append(generation.generer(n)[2][i])\r\n#enregistrer les donnees generes dans un fichier csv sous le nom LesDonnesDyna\r\nwith open(\"LesDonnesDyna.csv\", \"w\") as new_file:\r\n csv_writer = csv.writer(new_file, delimiter='\\t')\r\n csv_writer.writerow(['i', 'Pi', 'Wi', 'Di'])\r\n for i in range(1, n + 1):\r\n csv_writer.writerow([i, pi[i - 1], wi[i - 1], di[i - 1]])\r\n\r\n#pour generer les sequenses possibles\r\n# i c'est le nombre de chaque sequence\r\ndebut = time.time()\r\ndef all_subsets(ss,i):\r\n return chain(*map(lambda x: combinations(ss, x), range(i,i+1)))\r\n#Combinaison est une liste qui stoque les differentes sous listes (sous taches)\r\ncombinaison=[]\r\nfor subset in all_subsets(k,2):\r\n combinaison.append(subset)\r\n\r\n#pour calculer la condition initiale qui est l'execution d'une seule tache\r\ndef f1(i):\r\n return wi[i[0]-1]*max(0,(pi[i[0]-1]-di[i[0]-1]))\r\n\r\n#pour calculer les valeurs optimales de 2 combinaisons\r\ndef f2(i):\r\n ci=0\r\n for j in range(len(i)):\r\n ci+=pi[int(i[j])-1]\r\n resultP={}\r\n resultP[str(i[0])+\" => \"+str(i[1])]=f1(supp(i[1],i))+wi[int(i[1])-1]*max(0,(ci-di[int(i[1])-1]))\r\n resultP[str(i[1])+\" => \"+str(i[0])]=f1(supp(i[0],i))+wi[int(i[0])-1]*max(0,(ci-di[int(i[0])-1]))\r\n #min=min_dic(resultP)\r\n mino=min(resultP.values())\r\n n=resultP.values().index(mino)\r\n v=resultP.keys()[n]\r\n #chemin.append(v)\r\n return mino,v\r\n\r\n\r\netapeII={}\r\n\r\nfor i in range(len(combinaison)):\r\n etapeII[combinaison[i]]=f2(combinaison[i])\r\n\r\nfor i in range(3,len(k)+1):\r\n combinaison1=[]\r\n for subset in all_subsets(k,i):\r\n combinaison1.append(subset)\r\n for t in range(len(combinaison1)):\r\n resultP={}\r\n for h in range(len(combinaison1[t])):\r\n ci=0\r\n for j in range(len(combinaison1[t])):\r\n ci+=pi[combinaison1[t][j] - 1]\r\n r=supp(combinaison1[t][h], combinaison1[t])\r\n resultP[etapeII[r][1] +' => ' + str(combinaison1[t][h])]= etapeII[r][0] + wi[combinaison1[t][h] - 1] * max(0, (ci - (di[combinaison1[t][h] - 1])))\r\n mino=min(resultP.values())\r\n n=resultP.values().index(mino)\r\n v=resultP.keys()[n]\r\n etapeII[combinaison1[t]]=(mino, v)\r\nfin = time.time()\r\n\r\n\r\nwith open('resultat.txt','w') as resultat:\r\n resultat.write(\"-la valeur optimal est :\"+str(etapeII[k][0])+\"\\n\")\r\n\r\n resultat.write(\"-La séquence optimale est : \\n\")\r\n resultat.write(\"-\"*len(str(etapeII[k][1])) +\"\\n\")\r\n resultat.write('|'+str(etapeII[k][1])+'|\\n')\r\n resultat.write(\"-\"*len(str(etapeII[k][1])) +\"\\n\")\r\n resultat.write(\"Le temps d'execution est : \"+str(fin - debut)+\" secondes.\")",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# Get Facebook's bAbi dataset
from utils import maybe_download
from shutil import rmtree
import os
import tarfile
def get_babi_en(get_10k=False):
data_dir = "datasets/tasks_1-20_v1-2/en/"
if get_10k == True:
data_dir = "datasets/tasks_1-20_v1-2/en-10k/"
maybe_download('https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123)
file = tarfile.open("datasets/babi_tasks_1-20_v1-2.tar.gz", "r:gz")
file.extractall("datasets")
file.close()
print("Some housekeeping...")
if not os.path.exists("datasets/babi"):
os.makedirs("datasets/babi")
for path, dir, files in os.walk(data_dir):
for file in files:
os.rename(os.path.join(data_dir, file), os.path.join("datasets/babi", file))
os.remove("datasets/babi_tasks_1-20_v1-2.tar.gz")
rmtree("datasets/tasks_1-20_v1-2")
print("Finished.")
|
normal
|
{
"blob_id": "7a4d04bd60b5f5555982af372145f9f4bcd83ca2",
"index": 8194,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_babi_en(get_10k=False):\n data_dir = 'datasets/tasks_1-20_v1-2/en/'\n if get_10k == True:\n data_dir = 'datasets/tasks_1-20_v1-2/en-10k/'\n maybe_download(\n 'https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz',\n 'datasets', 11745123)\n file = tarfile.open('datasets/babi_tasks_1-20_v1-2.tar.gz', 'r:gz')\n file.extractall('datasets')\n file.close()\n print('Some housekeeping...')\n if not os.path.exists('datasets/babi'):\n os.makedirs('datasets/babi')\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\n 'datasets/babi', file))\n os.remove('datasets/babi_tasks_1-20_v1-2.tar.gz')\n rmtree('datasets/tasks_1-20_v1-2')\n print('Finished.')\n",
"step-3": "from utils import maybe_download\nfrom shutil import rmtree\nimport os\nimport tarfile\n\n\ndef get_babi_en(get_10k=False):\n data_dir = 'datasets/tasks_1-20_v1-2/en/'\n if get_10k == True:\n data_dir = 'datasets/tasks_1-20_v1-2/en-10k/'\n maybe_download(\n 'https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz',\n 'datasets', 11745123)\n file = tarfile.open('datasets/babi_tasks_1-20_v1-2.tar.gz', 'r:gz')\n file.extractall('datasets')\n file.close()\n print('Some housekeeping...')\n if not os.path.exists('datasets/babi'):\n os.makedirs('datasets/babi')\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\n 'datasets/babi', file))\n os.remove('datasets/babi_tasks_1-20_v1-2.tar.gz')\n rmtree('datasets/tasks_1-20_v1-2')\n print('Finished.')\n",
"step-4": "# Get Facebook's bAbi dataset\nfrom utils import maybe_download\nfrom shutil import rmtree\nimport os\nimport tarfile\n\ndef get_babi_en(get_10k=False):\n data_dir = \"datasets/tasks_1-20_v1-2/en/\"\n if get_10k == True:\n data_dir = \"datasets/tasks_1-20_v1-2/en-10k/\"\n \n maybe_download('https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz', 'datasets', 11745123)\n file = tarfile.open(\"datasets/babi_tasks_1-20_v1-2.tar.gz\", \"r:gz\")\n file.extractall(\"datasets\")\n file.close()\n print(\"Some housekeeping...\")\n if not os.path.exists(\"datasets/babi\"):\n os.makedirs(\"datasets/babi\")\n for path, dir, files in os.walk(data_dir):\n for file in files:\n os.rename(os.path.join(data_dir, file), os.path.join(\"datasets/babi\", file)) \n os.remove(\"datasets/babi_tasks_1-20_v1-2.tar.gz\")\n rmtree(\"datasets/tasks_1-20_v1-2\")\n print(\"Finished.\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def create_user(open_ldap, smtp, entries):
"""
If the 'ldap_insert' returns True, then
the email will be send with the account info.
"""
try:
if open_ldap.ldap_insert(entries):
smtp.send_email(entries)
return True
else:
return False
except Exception as e:
print('ERROR - ', e)
return
<|reserved_special_token_0|>
def main():
args = parse_args()
print(run(args))
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_user(open_ldap, smtp, entries):
"""
If the 'ldap_insert' returns True, then
the email will be send with the account info.
"""
try:
if open_ldap.ldap_insert(entries):
smtp.send_email(entries)
return True
else:
return False
except Exception as e:
print('ERROR - ', e)
return
def run(args):
"""
Creates the OpenLDAP and SMTP
objects and iterates over the .csv file.
Calls the create_user function and check the
result (if 'true' the count will be increased).
Returns the total count of users created.
"""
open_ldap = OpenLdap(args.user, args.password, args.address)
smtp = SmtpServer(args.smtp_host, args.port, args.email, args.
email_password)
entries = {}
count = 0
for row in parse_csv(args.file):
try:
entries['name'] = row['name']
entries['lastname'] = row['lastname']
entries['email'] = row['email']
except KeyError as e:
return "ERROR - Missing '{}' csv header".format(e)
entries['password'] = random_password()
if create_user(open_ldap, smtp, entries):
count += 1
return 'INFO - Finished. Total of {} user(s) created'.format(count)
def main():
args = parse_args()
print(run(args))
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_user(open_ldap, smtp, entries):
"""
If the 'ldap_insert' returns True, then
the email will be send with the account info.
"""
try:
if open_ldap.ldap_insert(entries):
smtp.send_email(entries)
return True
else:
return False
except Exception as e:
print('ERROR - ', e)
return
def run(args):
"""
Creates the OpenLDAP and SMTP
objects and iterates over the .csv file.
Calls the create_user function and check the
result (if 'true' the count will be increased).
Returns the total count of users created.
"""
open_ldap = OpenLdap(args.user, args.password, args.address)
smtp = SmtpServer(args.smtp_host, args.port, args.email, args.
email_password)
entries = {}
count = 0
for row in parse_csv(args.file):
try:
entries['name'] = row['name']
entries['lastname'] = row['lastname']
entries['email'] = row['email']
except KeyError as e:
return "ERROR - Missing '{}' csv header".format(e)
entries['password'] = random_password()
if create_user(open_ldap, smtp, entries):
count += 1
return 'INFO - Finished. Total of {} user(s) created'.format(count)
def main():
args = parse_args()
print(run(args))
return 0
if __name__ == '__main__':
sys.exit(main())
<|reserved_special_token_1|>
import sys
from arguments_parser import parse_args
from open_ldap import OpenLdap
from csv_parser import parse_csv, random_password
from smtp_mail import SmtpServer
def create_user(open_ldap, smtp, entries):
"""
If the 'ldap_insert' returns True, then
the email will be send with the account info.
"""
try:
if open_ldap.ldap_insert(entries):
smtp.send_email(entries)
return True
else:
return False
except Exception as e:
print('ERROR - ', e)
return
def run(args):
"""
Creates the OpenLDAP and SMTP
objects and iterates over the .csv file.
Calls the create_user function and check the
result (if 'true' the count will be increased).
Returns the total count of users created.
"""
open_ldap = OpenLdap(args.user, args.password, args.address)
smtp = SmtpServer(args.smtp_host, args.port, args.email, args.
email_password)
entries = {}
count = 0
for row in parse_csv(args.file):
try:
entries['name'] = row['name']
entries['lastname'] = row['lastname']
entries['email'] = row['email']
except KeyError as e:
return "ERROR - Missing '{}' csv header".format(e)
entries['password'] = random_password()
if create_user(open_ldap, smtp, entries):
count += 1
return 'INFO - Finished. Total of {} user(s) created'.format(count)
def main():
args = parse_args()
print(run(args))
return 0
if __name__ == '__main__':
sys.exit(main())
<|reserved_special_token_1|>
import sys
from arguments_parser import parse_args
from open_ldap import OpenLdap
from csv_parser import parse_csv, random_password
from smtp_mail import SmtpServer
def create_user(open_ldap, smtp, entries):
"""
If the 'ldap_insert' returns True, then
the email will be send with the account info.
"""
try:
if open_ldap.ldap_insert(entries):
smtp.send_email(entries)
return True
else:
return False
except Exception as e:
print('ERROR - ', e)
return
def run(args):
"""
Creates the OpenLDAP and SMTP
objects and iterates over the .csv file.
Calls the create_user function and check the
result (if 'true' the count will be increased).
Returns the total count of users created.
"""
open_ldap = OpenLdap(args.user,
args.password,
args.address)
smtp = SmtpServer(args.smtp_host,
args.port,
args.email,
args.email_password)
entries = {}
count = 0
for row in parse_csv(args.file):
try:
entries['name'] = row['name']
entries['lastname'] = row['lastname']
entries['email'] = row['email']
except KeyError as e:
return "ERROR - Missing '{}' csv header".format(e)
entries['password'] = random_password()
if create_user(open_ldap, smtp, entries):
count += 1
return "INFO - Finished. Total of {} user(s) created".format(count)
def main():
args = parse_args()
print(run(args))
return 0
if __name__ == "__main__":
sys.exit(main())
|
flexible
|
{
"blob_id": "4f0a0089ad128edca3052da58a4c71f935592e25",
"index": 4499,
"step-1": "<mask token>\n\n\ndef create_user(open_ldap, smtp, entries):\n \"\"\"\n If the 'ldap_insert' returns True, then\n the email will be send with the account info.\n \"\"\"\n try:\n if open_ldap.ldap_insert(entries):\n smtp.send_email(entries)\n return True\n else:\n return False\n except Exception as e:\n print('ERROR - ', e)\n return\n\n\n<mask token>\n\n\ndef main():\n args = parse_args()\n print(run(args))\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_user(open_ldap, smtp, entries):\n \"\"\"\n If the 'ldap_insert' returns True, then\n the email will be send with the account info.\n \"\"\"\n try:\n if open_ldap.ldap_insert(entries):\n smtp.send_email(entries)\n return True\n else:\n return False\n except Exception as e:\n print('ERROR - ', e)\n return\n\n\ndef run(args):\n \"\"\"\n Creates the OpenLDAP and SMTP\n objects and iterates over the .csv file.\n Calls the create_user function and check the\n result (if 'true' the count will be increased).\n Returns the total count of users created.\n \"\"\"\n open_ldap = OpenLdap(args.user, args.password, args.address)\n smtp = SmtpServer(args.smtp_host, args.port, args.email, args.\n email_password)\n entries = {}\n count = 0\n for row in parse_csv(args.file):\n try:\n entries['name'] = row['name']\n entries['lastname'] = row['lastname']\n entries['email'] = row['email']\n except KeyError as e:\n return \"ERROR - Missing '{}' csv header\".format(e)\n entries['password'] = random_password()\n if create_user(open_ldap, smtp, entries):\n count += 1\n return 'INFO - Finished. Total of {} user(s) created'.format(count)\n\n\ndef main():\n args = parse_args()\n print(run(args))\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_user(open_ldap, smtp, entries):\n \"\"\"\n If the 'ldap_insert' returns True, then\n the email will be send with the account info.\n \"\"\"\n try:\n if open_ldap.ldap_insert(entries):\n smtp.send_email(entries)\n return True\n else:\n return False\n except Exception as e:\n print('ERROR - ', e)\n return\n\n\ndef run(args):\n \"\"\"\n Creates the OpenLDAP and SMTP\n objects and iterates over the .csv file.\n Calls the create_user function and check the\n result (if 'true' the count will be increased).\n Returns the total count of users created.\n \"\"\"\n open_ldap = OpenLdap(args.user, args.password, args.address)\n smtp = SmtpServer(args.smtp_host, args.port, args.email, args.\n email_password)\n entries = {}\n count = 0\n for row in parse_csv(args.file):\n try:\n entries['name'] = row['name']\n entries['lastname'] = row['lastname']\n entries['email'] = row['email']\n except KeyError as e:\n return \"ERROR - Missing '{}' csv header\".format(e)\n entries['password'] = random_password()\n if create_user(open_ldap, smtp, entries):\n count += 1\n return 'INFO - Finished. Total of {} user(s) created'.format(count)\n\n\ndef main():\n args = parse_args()\n print(run(args))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import sys\nfrom arguments_parser import parse_args\nfrom open_ldap import OpenLdap\nfrom csv_parser import parse_csv, random_password\nfrom smtp_mail import SmtpServer\n\n\ndef create_user(open_ldap, smtp, entries):\n \"\"\"\n If the 'ldap_insert' returns True, then\n the email will be send with the account info.\n \"\"\"\n try:\n if open_ldap.ldap_insert(entries):\n smtp.send_email(entries)\n return True\n else:\n return False\n except Exception as e:\n print('ERROR - ', e)\n return\n\n\ndef run(args):\n \"\"\"\n Creates the OpenLDAP and SMTP\n objects and iterates over the .csv file.\n Calls the create_user function and check the\n result (if 'true' the count will be increased).\n Returns the total count of users created.\n \"\"\"\n open_ldap = OpenLdap(args.user, args.password, args.address)\n smtp = SmtpServer(args.smtp_host, args.port, args.email, args.\n email_password)\n entries = {}\n count = 0\n for row in parse_csv(args.file):\n try:\n entries['name'] = row['name']\n entries['lastname'] = row['lastname']\n entries['email'] = row['email']\n except KeyError as e:\n return \"ERROR - Missing '{}' csv header\".format(e)\n entries['password'] = random_password()\n if create_user(open_ldap, smtp, entries):\n count += 1\n return 'INFO - Finished. Total of {} user(s) created'.format(count)\n\n\ndef main():\n args = parse_args()\n print(run(args))\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "import sys\nfrom arguments_parser import parse_args\nfrom open_ldap import OpenLdap\nfrom csv_parser import parse_csv, random_password\nfrom smtp_mail import SmtpServer\n\n\ndef create_user(open_ldap, smtp, entries):\n \"\"\"\n If the 'ldap_insert' returns True, then\n the email will be send with the account info.\n \"\"\"\n try:\n if open_ldap.ldap_insert(entries):\n smtp.send_email(entries)\n return True\n else:\n return False\n except Exception as e:\n print('ERROR - ', e)\n return\n\n\ndef run(args):\n \"\"\"\n Creates the OpenLDAP and SMTP\n objects and iterates over the .csv file.\n Calls the create_user function and check the\n result (if 'true' the count will be increased).\n Returns the total count of users created.\n \"\"\"\n open_ldap = OpenLdap(args.user,\n args.password,\n args.address)\n smtp = SmtpServer(args.smtp_host,\n args.port,\n args.email,\n args.email_password)\n entries = {}\n count = 0\n for row in parse_csv(args.file):\n try:\n entries['name'] = row['name']\n entries['lastname'] = row['lastname']\n entries['email'] = row['email']\n except KeyError as e:\n return \"ERROR - Missing '{}' csv header\".format(e)\n entries['password'] = random_password()\n if create_user(open_ldap, smtp, entries):\n count += 1\n return \"INFO - Finished. Total of {} user(s) created\".format(count)\n\n\ndef main():\n args = parse_args()\n print(run(args))\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def process_all(folder):
exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']
for root, dirs, files in os.walk(folder):
dirs[:] = [d for d in dirs if d not in exclude]
for file in files:
if re.search('^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$', file):
logical_units(os.path.join(root, file))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def logical_units(file):
ar_ra = re.compile(
'^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'
)
with open(file, 'r', encoding='utf8') as f1:
book = f1.read()
if splitter in book:
log_ids = re.findall('\n#\\d+#', book)
if len(log_ids) > 0:
print(
'\tthe text already have %d logical units of this length' %
len(log_ids))
pass
else:
new_data = []
head = book.split(splitter)[0]
text = book.split(splitter)[1]
token_count = 0
data = re.findall('\\w+|\\W+', text)
word_len = len(str(len(data)))
data_len = len(data)
for i in range(0, data_len):
if '\n#' in data[i]:
if 'Page' in data[i + 1]:
new_data.append(data[i])
else:
last = data[i].rfind('#')
token_cnt_str = str(token_count + 1)
if len(token_cnt_str) < word_len:
tmp_cnt = token_cnt_str.zfill(word_len)
else:
tmp_cnt = token_cnt_str
tmp = data[i][:last] + '#' + tmp_cnt + data[i][last
:]
new_data.append(tmp)
elif ar_token_cnt(ar_ra, data[i]):
token_count += 1
new_data.append(data[i])
else:
new_data.append(data[i])
log_text = ''.join(new_data)
log_text = head + splitter + log_text
with open(file + '_logical', 'w', encoding='utf8') as f:
f.write(log_text)
else:
print('The file is missing the splitter!')
print(file)
def ar_token_cnt(ar_ra, text):
return sum(ar_ra.search(t) is not None for t in re.findall('\\w+|\\W+',
text))
def process_all(folder):
exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']
for root, dirs, files in os.walk(folder):
dirs[:] = [d for d in dirs if d not in exclude]
for file in files:
if re.search('^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$', file):
logical_units(os.path.join(root, file))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
splitter = '#META#Header#End#'
def logical_units(file):
ar_ra = re.compile(
'^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'
)
with open(file, 'r', encoding='utf8') as f1:
book = f1.read()
if splitter in book:
log_ids = re.findall('\n#\\d+#', book)
if len(log_ids) > 0:
print(
'\tthe text already have %d logical units of this length' %
len(log_ids))
pass
else:
new_data = []
head = book.split(splitter)[0]
text = book.split(splitter)[1]
token_count = 0
data = re.findall('\\w+|\\W+', text)
word_len = len(str(len(data)))
data_len = len(data)
for i in range(0, data_len):
if '\n#' in data[i]:
if 'Page' in data[i + 1]:
new_data.append(data[i])
else:
last = data[i].rfind('#')
token_cnt_str = str(token_count + 1)
if len(token_cnt_str) < word_len:
tmp_cnt = token_cnt_str.zfill(word_len)
else:
tmp_cnt = token_cnt_str
tmp = data[i][:last] + '#' + tmp_cnt + data[i][last
:]
new_data.append(tmp)
elif ar_token_cnt(ar_ra, data[i]):
token_count += 1
new_data.append(data[i])
else:
new_data.append(data[i])
log_text = ''.join(new_data)
log_text = head + splitter + log_text
with open(file + '_logical', 'w', encoding='utf8') as f:
f.write(log_text)
else:
print('The file is missing the splitter!')
print(file)
def ar_token_cnt(ar_ra, text):
return sum(ar_ra.search(t) is not None for t in re.findall('\\w+|\\W+',
text))
def process_all(folder):
exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']
for root, dirs, files in os.walk(folder):
dirs[:] = [d for d in dirs if d not in exclude]
for file in files:
if re.search('^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$', file):
logical_units(os.path.join(root, file))
<|reserved_special_token_1|>
import re
import os
splitter = '#META#Header#End#'
def logical_units(file):
ar_ra = re.compile(
'^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'
)
with open(file, 'r', encoding='utf8') as f1:
book = f1.read()
if splitter in book:
log_ids = re.findall('\n#\\d+#', book)
if len(log_ids) > 0:
print(
'\tthe text already have %d logical units of this length' %
len(log_ids))
pass
else:
new_data = []
head = book.split(splitter)[0]
text = book.split(splitter)[1]
token_count = 0
data = re.findall('\\w+|\\W+', text)
word_len = len(str(len(data)))
data_len = len(data)
for i in range(0, data_len):
if '\n#' in data[i]:
if 'Page' in data[i + 1]:
new_data.append(data[i])
else:
last = data[i].rfind('#')
token_cnt_str = str(token_count + 1)
if len(token_cnt_str) < word_len:
tmp_cnt = token_cnt_str.zfill(word_len)
else:
tmp_cnt = token_cnt_str
tmp = data[i][:last] + '#' + tmp_cnt + data[i][last
:]
new_data.append(tmp)
elif ar_token_cnt(ar_ra, data[i]):
token_count += 1
new_data.append(data[i])
else:
new_data.append(data[i])
log_text = ''.join(new_data)
log_text = head + splitter + log_text
with open(file + '_logical', 'w', encoding='utf8') as f:
f.write(log_text)
else:
print('The file is missing the splitter!')
print(file)
def ar_token_cnt(ar_ra, text):
return sum(ar_ra.search(t) is not None for t in re.findall('\\w+|\\W+',
text))
def process_all(folder):
exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']
for root, dirs, files in os.walk(folder):
dirs[:] = [d for d in dirs if d not in exclude]
for file in files:
if re.search('^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$', file):
logical_units(os.path.join(root, file))
<|reserved_special_token_1|>
# inserting logical unit ids for splitting texts into logical chunks
import re
import os
splitter = "#META#Header#End#"
def logical_units(file):
ar_ra = re.compile("^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$")
with open(file, "r", encoding="utf8") as f1:
book = f1.read()
# splitter test
if splitter in book:
# logical units
log_ids = re.findall("\n#\d+#", book)
if len(log_ids) > 0:
print("\tthe text already have %d logical units of this length" % len(log_ids))
pass
else:
# insert logical unit ids
new_data = []
head = book.split(splitter)[0]
text = book.split(splitter)[1]
token_count = 0
data = re.findall(r"\w+|\W+", text)
word_len = len(str(len(data)))
data_len = len(data)
for i in range(0, data_len):
if "\n#" in data[i]:
if "Page" in data[i + 1]:# or ar_token_cnt(ar_ra, data[i + 1]) <= 0:
new_data.append(data[i])
else:
last = data[i].rfind("#")
token_cnt_str = str(token_count + 1)
if len(token_cnt_str) < word_len:
tmp_cnt = token_cnt_str.zfill(word_len)
else:
tmp_cnt = token_cnt_str
tmp = data[i][:last] + "#" + tmp_cnt + data[i][last:]
new_data.append(tmp)
elif ar_token_cnt(ar_ra, data[i]):
token_count += 1
new_data.append(data[i])
else:
new_data.append(data[i])
log_text = "".join(new_data)
log_text = head + splitter + log_text
with open(file + "_logical", "w", encoding="utf8") as f:
f.write(log_text)
else:
print("The file is missing the splitter!")
print(file)
def ar_token_cnt(ar_ra, text):
return sum(ar_ra.search(t) is not None for t in re.findall(r"\w+|\W+", text))
# process all texts in OpenITI
def process_all(folder):
exclude = (["OpenITI.github.io", "Annotation", "_maintenance", "i.mech"])
for root, dirs, files in os.walk(folder):
# print("root: ",root)
dirs[:] = [d for d in dirs if d not in exclude]
# print("dir: ",dirs)
for file in files:
if re.search("^\d{4}\w+\.\w+\.\w+-ara\d$", file):
logical_units(os.path.join(root, file))
# return
# input()
# /media/rostam/Seagate Backup Plus Drive
# process_all("/home/rostam/projs/KITAB/test")
# print("Done!")
|
flexible
|
{
"blob_id": "5c001303962315afe2512eb307376f6f7a883cf9",
"index": 6831,
"step-1": "<mask token>\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-2": "<mask token>\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-3": "<mask token>\nsplitter = '#META#Header#End#'\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-4": "import re\nimport os\nsplitter = '#META#Header#End#'\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-5": "# inserting logical unit ids for splitting texts into logical chunks\n\nimport re\nimport os\n\nsplitter = \"#META#Header#End#\"\n\n\ndef logical_units(file):\n ar_ra = re.compile(\"^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$\")\n\n with open(file, \"r\", encoding=\"utf8\") as f1:\n book = f1.read()\n\n # splitter test\n if splitter in book:\n # logical units\n log_ids = re.findall(\"\\n#\\d+#\", book)\n if len(log_ids) > 0:\n print(\"\\tthe text already have %d logical units of this length\" % len(log_ids))\n pass\n else:\n # insert logical unit ids\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n\n data = re.findall(r\"\\w+|\\W+\", text)\n word_len = len(str(len(data)))\n data_len = len(data)\n\n for i in range(0, data_len):\n if \"\\n#\" in data[i]:\n if \"Page\" in data[i + 1]:# or ar_token_cnt(ar_ra, data[i + 1]) <= 0:\n new_data.append(data[i])\n else:\n last = data[i].rfind(\"#\")\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + \"#\" + tmp_cnt + data[i][last:]\n new_data.append(tmp)\n\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n\n log_text = \"\".join(new_data)\n log_text = head + splitter + log_text\n\n with open(file + \"_logical\", \"w\", encoding=\"utf8\") as f:\n f.write(log_text)\n\n else:\n print(\"The file is missing the splitter!\")\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall(r\"\\w+|\\W+\", text))\n\n\n# process all texts in OpenITI\n\n\ndef process_all(folder):\n exclude = ([\"OpenITI.github.io\", \"Annotation\", \"_maintenance\", \"i.mech\"])\n for root, dirs, files in os.walk(folder):\n # print(\"root: \",root)\n dirs[:] = [d for d in dirs if d not in exclude]\n # print(\"dir: \",dirs)\n for file in files:\n if re.search(\"^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$\", file):\n logical_units(os.path.join(root, file))\n # return\n # input()\n\n\n# /media/rostam/Seagate Backup Plus Drive\n# process_all(\"/home/rostam/projs/KITAB/test\")\n\n# print(\"Done!\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from helper import *
async def main(URL, buy_time):
browser, page = await get_window()
# 30s登陆时间
await page.goto('https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180')
await asyncio.sleep(30)
# 选款式时间10s
await page.goto(URL)
await asyncio.sleep(10)
await sleep_time(buy_time)
old_url = page.url
#加入购物车
while True:
index = 0
try:
print(f'重试 {index}')
# 找到“加入购物车”,点击
await page.click('[class="btn btn-primary"]')
break
except:
index += 1
await asyncio.sleep(CLICK_FREQUENCY)
# 等待页面跳转
while True:
if page.url != old_url:
break
await asyncio.sleep(CLICK_FREQUENCY)
while True:
try:
# 找到“进入购物车”,点击
await page.click('[class="btn btn-primary"]')
break
except:
await asyncio.sleep(CLICK_FREQUENCY)
# 付款
await asyncio.sleep(100)
await close_window(browser)
if __name__ == '__main__':
URL = input('宝贝链接:\n')
buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\n')
asyncio.run(main(URL, buy_time))
|
normal
|
{
"blob_id": "1e87f625fb7bd9f9bf4233229332c909702954a5",
"index": 4334,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n await page.goto(\n 'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'\n )\n await asyncio.sleep(30)\n await page.goto(URL)\n await asyncio.sleep(10)\n await sleep_time(buy_time)\n old_url = page.url\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n try:\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n await asyncio.sleep(100)\n await close_window(browser)\n\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))\n",
"step-3": "from helper import *\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n await page.goto(\n 'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'\n )\n await asyncio.sleep(30)\n await page.goto(URL)\n await asyncio.sleep(10)\n await sleep_time(buy_time)\n old_url = page.url\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n try:\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n await asyncio.sleep(100)\n await close_window(browser)\n\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))\n",
"step-4": "from helper import *\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n # 30s登陆时间\n await page.goto('https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180')\n await asyncio.sleep(30)\n\n # 选款式时间10s\n await page.goto(URL)\n await asyncio.sleep(10)\n\n await sleep_time(buy_time)\n old_url = page.url\n\n #加入购物车\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n # 找到“加入购物车”,点击\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n\n # 等待页面跳转\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n\n while True:\n try:\n # 找到“进入购物车”,点击\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n # 付款\n await asyncio.sleep(100)\n await close_window(browser)\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
<|reserved_special_token_0|>
def test_link_received_by_sns():
with Client(app.app) as client:
with open('sns_message.txt') as f:
event = client.events.generate_sns_event(message=f.read())
with open('/tmp/event.json', 'w') as f:
import json
f.write(json.dumps(event))
response = client.lambda_.invoke('handle_link_visit', event)
assert response.payload['message'] == 'link visited'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
def test_index_without_url():
with Client(app.app) as client:
response = client.http.get('/')
assert response.body == b'Invalid or missing url'
def test_link_received_by_sns():
with Client(app.app) as client:
with open('sns_message.txt') as f:
event = client.events.generate_sns_event(message=f.read())
with open('/tmp/event.json', 'w') as f:
import json
f.write(json.dumps(event))
response = client.lambda_.invoke('handle_link_visit', event)
assert response.payload['message'] == 'link visited'
<|reserved_special_token_1|>
from http import HTTPStatus
import app
from chalice.test import Client
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
def test_index_without_url():
with Client(app.app) as client:
response = client.http.get('/')
assert response.body == b'Invalid or missing url'
def test_link_received_by_sns():
with Client(app.app) as client:
with open('sns_message.txt') as f:
event = client.events.generate_sns_event(message=f.read())
with open('/tmp/event.json', 'w') as f:
import json
f.write(json.dumps(event))
response = client.lambda_.invoke('handle_link_visit', event)
assert response.payload['message'] == 'link visited'
<|reserved_special_token_1|>
from http import HTTPStatus
#from pytest_chalice.handlers import RequestHandler
import app
from chalice.test import Client
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
def test_index_without_url():
with Client(app.app) as client:
response = client.http.get('/')
assert response.body == b'Invalid or missing url'
def test_link_received_by_sns():
with Client(app.app) as client:
with open('sns_message.txt') as f:
event = client.events.generate_sns_event(message=f.read())
with open('/tmp/event.json', 'w') as f:
import json
f.write(json.dumps(event))
response = client.lambda_.invoke('handle_link_visit', event)
assert response.payload['message'] == 'link visited'
|
flexible
|
{
"blob_id": "e7e9a53d4c41448521b324d51641a46827faa692",
"index": 2607,
"step-1": "<mask token>\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\n<mask token>\n\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'\n",
"step-3": "<mask token>\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\ndef test_index_without_url():\n with Client(app.app) as client:\n response = client.http.get('/')\n assert response.body == b'Invalid or missing url'\n\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'\n",
"step-4": "from http import HTTPStatus\nimport app\nfrom chalice.test import Client\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\ndef test_index_without_url():\n with Client(app.app) as client:\n response = client.http.get('/')\n assert response.body == b'Invalid or missing url'\n\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'\n",
"step-5": "from http import HTTPStatus\n#from pytest_chalice.handlers import RequestHandler\nimport app\nfrom chalice.test import Client\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\ndef test_index_without_url():\n with Client(app.app) as client:\n response = client.http.get('/')\n assert response.body == b'Invalid or missing url'\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
def detectPeriod(data):
numWord = "[0-9,一二三四五六七八九十兩半]"
hourWord = "小時鐘頭"
minWord = "分鐘"
secWord = "秒鐘"
timePat = "["+numWord+"]+點?\.?["+numWord+"]*個?半?["+hourWord+"]*半?又?["+numWord+"]*["+minWord+"]*又?["+numWord+"]*["+secWord+"]*"
def main():
detectPeriod("我要去游泳一個小時")
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "397686964acbf640a5463a3a7095d85832545d9e",
"index": 6462,
"step-1": "<mask token>\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef detectPeriod(data):\n numWord = '[0-9,一二三四五六七八九十兩半]'\n hourWord = '小時鐘頭'\n minWord = '分鐘'\n secWord = '秒鐘'\n timePat = ('[' + numWord + ']+點?\\\\.?[' + numWord + ']*個?半?[' + hourWord +\n ']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +\n secWord + ']*')\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef detectPeriod(data):\n numWord = '[0-9,一二三四五六七八九十兩半]'\n hourWord = '小時鐘頭'\n minWord = '分鐘'\n secWord = '秒鐘'\n timePat = ('[' + numWord + ']+點?\\\\.?[' + numWord + ']*個?半?[' + hourWord +\n ']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +\n secWord + ']*')\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import re\n\n\ndef detectPeriod(data):\n numWord = '[0-9,一二三四五六七八九十兩半]'\n hourWord = '小時鐘頭'\n minWord = '分鐘'\n secWord = '秒鐘'\n timePat = ('[' + numWord + ']+點?\\\\.?[' + numWord + ']*個?半?[' + hourWord +\n ']*半?又?[' + numWord + ']*[' + minWord + ']*又?[' + numWord + ']*[' +\n secWord + ']*')\n\n\ndef main():\n detectPeriod('我要去游泳一個小時')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import re\n\n\ndef detectPeriod(data):\n \n numWord = \"[0-9,一二三四五六七八九十兩半]\"\n hourWord = \"小時鐘頭\"\n minWord = \"分鐘\"\n secWord = \"秒鐘\"\n\n\n timePat = \"[\"+numWord+\"]+點?\\.?[\"+numWord+\"]*個?半?[\"+hourWord+\"]*半?又?[\"+numWord+\"]*[\"+minWord+\"]*又?[\"+numWord+\"]*[\"+secWord+\"]*\"\n\n\n\n\ndef main():\n detectPeriod(\"我要去游泳一個小時\")\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(1, n + 1):
for j in range(1, n + 1):
if i == j:
graph[i][j] = 0
for _ in range(m):
a, b = map(int, Read().split())
graph[a][b] = 1
for k in range(1, n + 1):
for i in range(1, n + 1):
for j in range(1, n + 1):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
<|reserved_special_token_0|>
for i in range(1, n + 1):
count = 0
for j in range(1, n + 1):
if graph[i][j] != INF or graph[j][i] != INF:
count += 1
if count == n:
result += 1
print(result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Read = stdin.readline
INF = int(1000000000.0)
n, m = map(int, Read().split())
graph = [([INF] * (n + 1)) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, n + 1):
if i == j:
graph[i][j] = 0
for _ in range(m):
a, b = map(int, Read().split())
graph[a][b] = 1
for k in range(1, n + 1):
for i in range(1, n + 1):
for j in range(1, n + 1):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
result = 0
for i in range(1, n + 1):
count = 0
for j in range(1, n + 1):
if graph[i][j] != INF or graph[j][i] != INF:
count += 1
if count == n:
result += 1
print(result)
<|reserved_special_token_1|>
from sys import stdin
Read = stdin.readline
INF = int(1000000000.0)
n, m = map(int, Read().split())
graph = [([INF] * (n + 1)) for _ in range(n + 1)]
for i in range(1, n + 1):
for j in range(1, n + 1):
if i == j:
graph[i][j] = 0
for _ in range(m):
a, b = map(int, Read().split())
graph[a][b] = 1
for k in range(1, n + 1):
for i in range(1, n + 1):
for j in range(1, n + 1):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
result = 0
for i in range(1, n + 1):
count = 0
for j in range(1, n + 1):
if graph[i][j] != INF or graph[j][i] != INF:
count += 1
if count == n:
result += 1
print(result)
<|reserved_special_token_1|>
from sys import stdin
Read = stdin.readline
INF = int(1e9)
n, m = map(int, Read().split())
graph = [[INF] * (n+1) for _ in range(n+1)]
for i in range(1, n+1):
for j in range(1, n+1):
if i == j:
graph[i][j] = 0
for _ in range(m):
a, b = map(int, Read().split())
graph[a][b] = 1
for k in range(1, n+1):
for i in range(1, n+1):
for j in range(1, n+1):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
result = 0
for i in range(1, n+1):
count = 0
for j in range(1, n+1):
if graph[i][j] != INF or graph[j][i] != INF:
count += 1
if count == n:
result += 1
print(result)
|
flexible
|
{
"blob_id": "6ec39aa712c8abe610418e410883ff168d73126d",
"index": 3292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\n<mask token>\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n",
"step-3": "<mask token>\nRead = stdin.readline\nINF = int(1000000000.0)\nn, m = map(int, Read().split())\ngraph = [([INF] * (n + 1)) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\nresult = 0\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n",
"step-4": "from sys import stdin\nRead = stdin.readline\nINF = int(1000000000.0)\nn, m = map(int, Read().split())\ngraph = [([INF] * (n + 1)) for _ in range(n + 1)]\nfor i in range(1, n + 1):\n for j in range(1, n + 1):\n if i == j:\n graph[i][j] = 0\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\nfor k in range(1, n + 1):\n for i in range(1, n + 1):\n for j in range(1, n + 1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\nresult = 0\nfor i in range(1, n + 1):\n count = 0\n for j in range(1, n + 1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n if count == n:\n result += 1\nprint(result)\n",
"step-5": "from sys import stdin\nRead = stdin.readline\nINF = int(1e9)\n\nn, m = map(int, Read().split())\ngraph = [[INF] * (n+1) for _ in range(n+1)]\n\nfor i in range(1, n+1):\n for j in range(1, n+1):\n if i == j:\n graph[i][j] = 0\n\nfor _ in range(m):\n a, b = map(int, Read().split())\n graph[a][b] = 1\n\nfor k in range(1, n+1):\n for i in range(1, n+1):\n for j in range(1, n+1):\n graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])\n\nresult = 0\nfor i in range(1, n+1):\n count = 0\n for j in range(1, n+1):\n if graph[i][j] != INF or graph[j][i] != INF:\n count += 1\n\n if count == n:\n result += 1\n\nprint(result)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LoginPageTests(RegistrationBaseTestCase):
def test_can_open_whatsapp_login_page(self):
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
def test_checkbox_remember_me_is_checked_by_default(self):
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_uncheck_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
def test_can_uncheck_and_check_again_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
self.login_page.remember_me = True
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_manually_login_successfully(self):
base_page = BasePage(self.driver)
base_page.load()
time.sleep(8)
self.assertTrue(base_page.is_title_matches())
self.assertTrue(base_page.is_welcome_page_available())
self.assertTrue(base_page.is_nav_bar_page_available())
self.assertTrue(base_page.is_search_page_available())
self.assertTrue(base_page.is_pane_page_available())
self.assertFalse(base_page.is_chat_page_available())
class LogoutTest(RegistrationBaseTestCase):
def test_can_logout_successfully_after_login(self):
header_page = HeaderPage(self.driver)
time.sleep(8)
self.assertTrue(header_page.is_welcome_page_available())
self.assertTrue(header_page.is_nav_bar_page_available())
header_page.logout()
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegistrationBaseTestCase(TestCase):
<|reserved_special_token_0|>
def tearDown(self):
self.driver.close()
class LoginPageTests(RegistrationBaseTestCase):
def test_can_open_whatsapp_login_page(self):
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
def test_checkbox_remember_me_is_checked_by_default(self):
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_uncheck_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
def test_can_uncheck_and_check_again_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
self.login_page.remember_me = True
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_manually_login_successfully(self):
base_page = BasePage(self.driver)
base_page.load()
time.sleep(8)
self.assertTrue(base_page.is_title_matches())
self.assertTrue(base_page.is_welcome_page_available())
self.assertTrue(base_page.is_nav_bar_page_available())
self.assertTrue(base_page.is_search_page_available())
self.assertTrue(base_page.is_pane_page_available())
self.assertFalse(base_page.is_chat_page_available())
class LogoutTest(RegistrationBaseTestCase):
def test_can_logout_successfully_after_login(self):
header_page = HeaderPage(self.driver)
time.sleep(8)
self.assertTrue(header_page.is_welcome_page_available())
self.assertTrue(header_page.is_nav_bar_page_available())
header_page.logout()
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegistrationBaseTestCase(TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.login_page = LoginPage(self.driver)
self.login_page.load()
def tearDown(self):
self.driver.close()
class LoginPageTests(RegistrationBaseTestCase):
def test_can_open_whatsapp_login_page(self):
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
def test_checkbox_remember_me_is_checked_by_default(self):
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_uncheck_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
def test_can_uncheck_and_check_again_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
self.login_page.remember_me = True
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_manually_login_successfully(self):
base_page = BasePage(self.driver)
base_page.load()
time.sleep(8)
self.assertTrue(base_page.is_title_matches())
self.assertTrue(base_page.is_welcome_page_available())
self.assertTrue(base_page.is_nav_bar_page_available())
self.assertTrue(base_page.is_search_page_available())
self.assertTrue(base_page.is_pane_page_available())
self.assertFalse(base_page.is_chat_page_available())
class LogoutTest(RegistrationBaseTestCase):
def test_can_logout_successfully_after_login(self):
header_page = HeaderPage(self.driver)
time.sleep(8)
self.assertTrue(header_page.is_welcome_page_available())
self.assertTrue(header_page.is_nav_bar_page_available())
header_page.logout()
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import time
import unittest
from unittest import TestCase
from selenium import webdriver
from simon.accounts.pages import LoginPage
from simon.header.pages import HeaderPage
from simon.pages import BasePage
class RegistrationBaseTestCase(TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.login_page = LoginPage(self.driver)
self.login_page.load()
def tearDown(self):
self.driver.close()
class LoginPageTests(RegistrationBaseTestCase):
def test_can_open_whatsapp_login_page(self):
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
def test_checkbox_remember_me_is_checked_by_default(self):
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_uncheck_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
def test_can_uncheck_and_check_again_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
self.login_page.remember_me = True
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_manually_login_successfully(self):
base_page = BasePage(self.driver)
base_page.load()
time.sleep(8)
self.assertTrue(base_page.is_title_matches())
self.assertTrue(base_page.is_welcome_page_available())
self.assertTrue(base_page.is_nav_bar_page_available())
self.assertTrue(base_page.is_search_page_available())
self.assertTrue(base_page.is_pane_page_available())
self.assertFalse(base_page.is_chat_page_available())
class LogoutTest(RegistrationBaseTestCase):
def test_can_logout_successfully_after_login(self):
header_page = HeaderPage(self.driver)
time.sleep(8)
self.assertTrue(header_page.is_welcome_page_available())
self.assertTrue(header_page.is_nav_bar_page_available())
header_page.logout()
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import time
import unittest
from unittest import TestCase
from selenium import webdriver
from simon.accounts.pages import LoginPage
from simon.header.pages import HeaderPage
from simon.pages import BasePage
class RegistrationBaseTestCase(TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.login_page = LoginPage(self.driver)
self.login_page.load()
def tearDown(self):
self.driver.close()
class LoginPageTests(RegistrationBaseTestCase):
def test_can_open_whatsapp_login_page(self):
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
def test_checkbox_remember_me_is_checked_by_default(self):
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_uncheck_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
def test_can_uncheck_and_check_again_checkbox_remember_me(self):
self.login_page.remember_me = False
self.assertFalse(self.login_page.is_remember_me_selected())
self.login_page.remember_me = True
self.assertTrue(self.login_page.is_remember_me_selected())
def test_can_manually_login_successfully(self):
base_page = BasePage(self.driver)
base_page.load()
# time for you to read QR code and access whatsapp
time.sleep(8)
self.assertTrue(base_page.is_title_matches())
self.assertTrue(base_page.is_welcome_page_available())
self.assertTrue(base_page.is_nav_bar_page_available())
self.assertTrue(base_page.is_search_page_available())
self.assertTrue(base_page.is_pane_page_available())
# chat is only available after you click on a person to open the chat
self.assertFalse(base_page.is_chat_page_available())
class LogoutTest(RegistrationBaseTestCase):
def test_can_logout_successfully_after_login(self):
header_page = HeaderPage(self.driver)
# time for you to read QR code and access whatsapp
time.sleep(8)
self.assertTrue(header_page.is_welcome_page_available())
self.assertTrue(header_page.is_nav_bar_page_available())
header_page.logout()
self.assertTrue(self.login_page.is_title_matches())
self.assertTrue(self.login_page.is_instruction_title_matches())
if __name__ == "__main__":
unittest.main()
|
flexible
|
{
"blob_id": "380a28958fc6d1b403b29ede229860bf5f709572",
"index": 2550,
"step-1": "<mask token>\n\n\nclass LoginPageTests(RegistrationBaseTestCase):\n\n def test_can_open_whatsapp_login_page(self):\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n def test_checkbox_remember_me_is_checked_by_default(self):\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_and_check_again_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n self.login_page.remember_me = True\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_manually_login_successfully(self):\n base_page = BasePage(self.driver)\n base_page.load()\n time.sleep(8)\n self.assertTrue(base_page.is_title_matches())\n self.assertTrue(base_page.is_welcome_page_available())\n self.assertTrue(base_page.is_nav_bar_page_available())\n self.assertTrue(base_page.is_search_page_available())\n self.assertTrue(base_page.is_pane_page_available())\n self.assertFalse(base_page.is_chat_page_available())\n\n\nclass LogoutTest(RegistrationBaseTestCase):\n\n def test_can_logout_successfully_after_login(self):\n header_page = HeaderPage(self.driver)\n time.sleep(8)\n self.assertTrue(header_page.is_welcome_page_available())\n self.assertTrue(header_page.is_nav_bar_page_available())\n header_page.logout()\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RegistrationBaseTestCase(TestCase):\n <mask token>\n\n def tearDown(self):\n self.driver.close()\n\n\nclass LoginPageTests(RegistrationBaseTestCase):\n\n def test_can_open_whatsapp_login_page(self):\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n def test_checkbox_remember_me_is_checked_by_default(self):\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_and_check_again_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n self.login_page.remember_me = True\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_manually_login_successfully(self):\n base_page = BasePage(self.driver)\n base_page.load()\n time.sleep(8)\n self.assertTrue(base_page.is_title_matches())\n self.assertTrue(base_page.is_welcome_page_available())\n self.assertTrue(base_page.is_nav_bar_page_available())\n self.assertTrue(base_page.is_search_page_available())\n self.assertTrue(base_page.is_pane_page_available())\n self.assertFalse(base_page.is_chat_page_available())\n\n\nclass LogoutTest(RegistrationBaseTestCase):\n\n def test_can_logout_successfully_after_login(self):\n header_page = HeaderPage(self.driver)\n time.sleep(8)\n self.assertTrue(header_page.is_welcome_page_available())\n self.assertTrue(header_page.is_nav_bar_page_available())\n header_page.logout()\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RegistrationBaseTestCase(TestCase):\n\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.maximize_window()\n self.login_page = LoginPage(self.driver)\n self.login_page.load()\n\n def tearDown(self):\n self.driver.close()\n\n\nclass LoginPageTests(RegistrationBaseTestCase):\n\n def test_can_open_whatsapp_login_page(self):\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n def test_checkbox_remember_me_is_checked_by_default(self):\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_and_check_again_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n self.login_page.remember_me = True\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_manually_login_successfully(self):\n base_page = BasePage(self.driver)\n base_page.load()\n time.sleep(8)\n self.assertTrue(base_page.is_title_matches())\n self.assertTrue(base_page.is_welcome_page_available())\n self.assertTrue(base_page.is_nav_bar_page_available())\n self.assertTrue(base_page.is_search_page_available())\n self.assertTrue(base_page.is_pane_page_available())\n self.assertFalse(base_page.is_chat_page_available())\n\n\nclass LogoutTest(RegistrationBaseTestCase):\n\n def test_can_logout_successfully_after_login(self):\n header_page = HeaderPage(self.driver)\n time.sleep(8)\n self.assertTrue(header_page.is_welcome_page_available())\n self.assertTrue(header_page.is_nav_bar_page_available())\n header_page.logout()\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import time\nimport unittest\nfrom unittest import TestCase\nfrom selenium import webdriver\nfrom simon.accounts.pages import LoginPage\nfrom simon.header.pages import HeaderPage\nfrom simon.pages import BasePage\n\n\nclass RegistrationBaseTestCase(TestCase):\n\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.maximize_window()\n self.login_page = LoginPage(self.driver)\n self.login_page.load()\n\n def tearDown(self):\n self.driver.close()\n\n\nclass LoginPageTests(RegistrationBaseTestCase):\n\n def test_can_open_whatsapp_login_page(self):\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n def test_checkbox_remember_me_is_checked_by_default(self):\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_and_check_again_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n self.login_page.remember_me = True\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_manually_login_successfully(self):\n base_page = BasePage(self.driver)\n base_page.load()\n time.sleep(8)\n self.assertTrue(base_page.is_title_matches())\n self.assertTrue(base_page.is_welcome_page_available())\n self.assertTrue(base_page.is_nav_bar_page_available())\n self.assertTrue(base_page.is_search_page_available())\n self.assertTrue(base_page.is_pane_page_available())\n self.assertFalse(base_page.is_chat_page_available())\n\n\nclass LogoutTest(RegistrationBaseTestCase):\n\n def test_can_logout_successfully_after_login(self):\n header_page = HeaderPage(self.driver)\n time.sleep(8)\n self.assertTrue(header_page.is_welcome_page_available())\n self.assertTrue(header_page.is_nav_bar_page_available())\n header_page.logout()\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import time\nimport unittest\nfrom unittest import TestCase\n\nfrom selenium import webdriver\n\nfrom simon.accounts.pages import LoginPage\nfrom simon.header.pages import HeaderPage\nfrom simon.pages import BasePage\n\n\nclass RegistrationBaseTestCase(TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.maximize_window()\n self.login_page = LoginPage(self.driver)\n self.login_page.load()\n\n def tearDown(self):\n self.driver.close()\n\n\nclass LoginPageTests(RegistrationBaseTestCase):\n def test_can_open_whatsapp_login_page(self):\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n def test_checkbox_remember_me_is_checked_by_default(self):\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n\n def test_can_uncheck_and_check_again_checkbox_remember_me(self):\n self.login_page.remember_me = False\n self.assertFalse(self.login_page.is_remember_me_selected())\n self.login_page.remember_me = True\n self.assertTrue(self.login_page.is_remember_me_selected())\n\n def test_can_manually_login_successfully(self):\n base_page = BasePage(self.driver)\n base_page.load()\n # time for you to read QR code and access whatsapp\n time.sleep(8)\n self.assertTrue(base_page.is_title_matches())\n self.assertTrue(base_page.is_welcome_page_available())\n self.assertTrue(base_page.is_nav_bar_page_available())\n self.assertTrue(base_page.is_search_page_available())\n self.assertTrue(base_page.is_pane_page_available())\n # chat is only available after you click on a person to open the chat\n self.assertFalse(base_page.is_chat_page_available())\n\n\nclass LogoutTest(RegistrationBaseTestCase):\n def test_can_logout_successfully_after_login(self):\n header_page = HeaderPage(self.driver)\n # time for you to read QR code and access whatsapp\n time.sleep(8)\n self.assertTrue(header_page.is_welcome_page_available())\n self.assertTrue(header_page.is_nav_bar_page_available())\n\n header_page.logout()\n self.assertTrue(self.login_page.is_title_matches())\n self.assertTrue(self.login_page.is_instruction_title_matches())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
import src.integralimage as II
import src.adaboost as AB
import src.utils as UT
import numpy as np
if __name__ == "__main__":
pos_training_path = 'dataset-1/trainset/faces'
neg_training_path = 'dataset-1/trainset/non-faces'
pos_testing_path = 'dataset-1/testset/faces'
neg_testing_path = 'dataset-1/testset/non-faces'
print('Loading training faces..')
faces_train = UT.load_images(pos_training_path)
faces_train_int = list(map(II.to_integral, faces_train))
print('..done. ' + str(len(faces_train)) + ' faces loaded.\n\nLoading non faces..')
non_faces_train = UT.load_images(neg_training_path)
non_faces_train_int = list(map(II.to_integral, non_faces_train))
print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\n')
#number of rounds: default is 5
num_classifiers = 5
# For performance reasons restricting feature size
min_feature_height = 6
max_feature_height = 8
min_feature_width = 6
max_feature_width = 8
#learn algorithm
classifiers = AB.learn(faces_train_int, non_faces_train_int, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width)
for n in range(len(classifiers)):
print(classifiers[n].type, classifiers[n].top_left, classifiers[n].width, classifiers[n].height, classifiers[n].threshold)
print('Loading test faces')
faces_test = UT.load_images(pos_testing_path)
faces_test_int = list(map(II.to_integral, faces_test))
print(str(len(faces_test)) + ' faces loaded.\n\nLoading test non faces..')
non_faces_test = UT.load_images(neg_testing_path)
non_faces_test_int = list(map(II.to_integral, non_faces_test))
print(str(len(non_faces_test)) + ' non faces loaded.\n')
print('Testing selected classifiers..')
correct_faces = 0
correct_non_faces = 0
correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int, non_faces_test_int, classifiers)
print('..done.\n\nResult:\n Faces: ' + str(correct_faces) + '/' + str(len(faces_test))
+ ' (' + str((float(correct_faces) / len(faces_test)) * 100) + '%)\n non-Faces: '
+ str(correct_non_faces) + '/' + str(len(non_faces_test)) + ' ('
+ str((float(correct_non_faces) / len(non_faces_test)) * 100) + '%)')
print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test))
+ ' (' + str((float(FN) / len(faces_test)) * 100) + '%)\n False Positive Rate: '
+ str(FP) + '/' + str(len(non_faces_test)) + ' ('
+ str((float(FP) / len(non_faces_test)) * 100) + '%)')
|
normal
|
{
"blob_id": "3f4f60ff315c8e7e4637a84629894012ed13280e",
"index": 3163,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pos_training_path = 'dataset-1/trainset/faces'\n neg_training_path = 'dataset-1/trainset/non-faces'\n pos_testing_path = 'dataset-1/testset/faces'\n neg_testing_path = 'dataset-1/testset/non-faces'\n print('Loading training faces..')\n faces_train = UT.load_images(pos_training_path)\n faces_train_int = list(map(II.to_integral, faces_train))\n print('..done. ' + str(len(faces_train)) +\n ' faces loaded.\\n\\nLoading non faces..')\n non_faces_train = UT.load_images(neg_training_path)\n non_faces_train_int = list(map(II.to_integral, non_faces_train))\n print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\\n')\n num_classifiers = 5\n min_feature_height = 6\n max_feature_height = 8\n min_feature_width = 6\n max_feature_width = 8\n classifiers = AB.learn(faces_train_int, non_faces_train_int,\n num_classifiers, min_feature_height, max_feature_height,\n min_feature_width, max_feature_width)\n for n in range(len(classifiers)):\n print(classifiers[n].type, classifiers[n].top_left, classifiers[n].\n width, classifiers[n].height, classifiers[n].threshold)\n print('Loading test faces')\n faces_test = UT.load_images(pos_testing_path)\n faces_test_int = list(map(II.to_integral, faces_test))\n print(str(len(faces_test)) + ' faces loaded.\\n\\nLoading test non faces..')\n non_faces_test = UT.load_images(neg_testing_path)\n non_faces_test_int = list(map(II.to_integral, non_faces_test))\n print(str(len(non_faces_test)) + ' non faces loaded.\\n')\n print('Testing selected classifiers..')\n correct_faces = 0\n correct_non_faces = 0\n correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int,\n non_faces_test_int, classifiers)\n print('..done.\\n\\nResult:\\n Faces: ' + str(correct_faces) + '/' +\n str(len(faces_test)) + ' (' + str(float(correct_faces) / len(\n faces_test) * 100) + '%)\\n non-Faces: ' + str(correct_non_faces) +\n '/' + str(len(non_faces_test)) + ' (' + str(float(\n correct_non_faces) / len(non_faces_test) * 100) + '%)')\n print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test)) +\n ' (' + str(float(FN) / len(faces_test) * 100) +\n \"\"\"%)\n False Positive Rate: \"\"\" + str(FP) + '/' + str(len(\n non_faces_test)) + ' (' + str(float(FP) / len(non_faces_test) * \n 100) + '%)')\n",
"step-3": "import src.integralimage as II\nimport src.adaboost as AB\nimport src.utils as UT\nimport numpy as np\nif __name__ == '__main__':\n pos_training_path = 'dataset-1/trainset/faces'\n neg_training_path = 'dataset-1/trainset/non-faces'\n pos_testing_path = 'dataset-1/testset/faces'\n neg_testing_path = 'dataset-1/testset/non-faces'\n print('Loading training faces..')\n faces_train = UT.load_images(pos_training_path)\n faces_train_int = list(map(II.to_integral, faces_train))\n print('..done. ' + str(len(faces_train)) +\n ' faces loaded.\\n\\nLoading non faces..')\n non_faces_train = UT.load_images(neg_training_path)\n non_faces_train_int = list(map(II.to_integral, non_faces_train))\n print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\\n')\n num_classifiers = 5\n min_feature_height = 6\n max_feature_height = 8\n min_feature_width = 6\n max_feature_width = 8\n classifiers = AB.learn(faces_train_int, non_faces_train_int,\n num_classifiers, min_feature_height, max_feature_height,\n min_feature_width, max_feature_width)\n for n in range(len(classifiers)):\n print(classifiers[n].type, classifiers[n].top_left, classifiers[n].\n width, classifiers[n].height, classifiers[n].threshold)\n print('Loading test faces')\n faces_test = UT.load_images(pos_testing_path)\n faces_test_int = list(map(II.to_integral, faces_test))\n print(str(len(faces_test)) + ' faces loaded.\\n\\nLoading test non faces..')\n non_faces_test = UT.load_images(neg_testing_path)\n non_faces_test_int = list(map(II.to_integral, non_faces_test))\n print(str(len(non_faces_test)) + ' non faces loaded.\\n')\n print('Testing selected classifiers..')\n correct_faces = 0\n correct_non_faces = 0\n correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int,\n non_faces_test_int, classifiers)\n print('..done.\\n\\nResult:\\n Faces: ' + str(correct_faces) + '/' +\n str(len(faces_test)) + ' (' + str(float(correct_faces) / len(\n faces_test) * 100) + '%)\\n non-Faces: ' + str(correct_non_faces) +\n '/' + str(len(non_faces_test)) + ' (' + str(float(\n correct_non_faces) / len(non_faces_test) * 100) + '%)')\n print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test)) +\n ' (' + str(float(FN) / len(faces_test) * 100) +\n \"\"\"%)\n False Positive Rate: \"\"\" + str(FP) + '/' + str(len(\n non_faces_test)) + ' (' + str(float(FP) / len(non_faces_test) * \n 100) + '%)')\n",
"step-4": "import src.integralimage as II\nimport src.adaboost as AB\nimport src.utils as UT\nimport numpy as np \n\nif __name__ == \"__main__\":\n pos_training_path = 'dataset-1/trainset/faces'\n neg_training_path = 'dataset-1/trainset/non-faces'\n pos_testing_path = 'dataset-1/testset/faces'\n neg_testing_path = 'dataset-1/testset/non-faces'\n\n print('Loading training faces..')\n faces_train = UT.load_images(pos_training_path)\n faces_train_int = list(map(II.to_integral, faces_train))\n print('..done. ' + str(len(faces_train)) + ' faces loaded.\\n\\nLoading non faces..')\n non_faces_train = UT.load_images(neg_training_path)\n non_faces_train_int = list(map(II.to_integral, non_faces_train))\n print('..done. ' + str(len(non_faces_train)) + ' non faces loaded.\\n')\n\n #number of rounds: default is 5\n num_classifiers = 5\n # For performance reasons restricting feature size\n min_feature_height = 6\n max_feature_height = 8\n min_feature_width = 6\n max_feature_width = 8\n \n #learn algorithm\n classifiers = AB.learn(faces_train_int, non_faces_train_int, num_classifiers, min_feature_height, max_feature_height, min_feature_width, max_feature_width)\n for n in range(len(classifiers)):\n print(classifiers[n].type, classifiers[n].top_left, classifiers[n].width, classifiers[n].height, classifiers[n].threshold)\n\n print('Loading test faces')\n faces_test = UT.load_images(pos_testing_path)\n faces_test_int = list(map(II.to_integral, faces_test))\n print(str(len(faces_test)) + ' faces loaded.\\n\\nLoading test non faces..')\n non_faces_test = UT.load_images(neg_testing_path)\n non_faces_test_int = list(map(II.to_integral, non_faces_test))\n print(str(len(non_faces_test)) + ' non faces loaded.\\n')\n \n print('Testing selected classifiers..')\n correct_faces = 0\n correct_non_faces = 0\n correct_faces, FN, FP, correct_non_faces = UT.count_rate(faces_test_int, non_faces_test_int, classifiers)\n\n print('..done.\\n\\nResult:\\n Faces: ' + str(correct_faces) + '/' + str(len(faces_test))\n + ' (' + str((float(correct_faces) / len(faces_test)) * 100) + '%)\\n non-Faces: '\n + str(correct_non_faces) + '/' + str(len(non_faces_test)) + ' ('\n + str((float(correct_non_faces) / len(non_faces_test)) * 100) + '%)')\n print('False Negative Rate: ' + str(FN) + '/' + str(len(faces_test))\n + ' (' + str((float(FN) / len(faces_test)) * 100) + '%)\\n False Positive Rate: '\n + str(FP) + '/' + str(len(non_faces_test)) + ' ('\n + str((float(FP) / len(non_faces_test)) * 100) + '%)')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def training(dict):
model = {}
model['µ'] = {}
model['sigma'] = {}
for x in dict:
model['µ'][x] = {}
model['sigma'][x] = {}
for y in dict[x]:
model['µ'][x][y] = {}
model['sigma'][x][y] = {}
doc = tr.load(dict[x][y])
phi_l = doc[0]
g_l = doc[1]
t_l = doc[2]
dphi_l = tr.delta(phi_l, t_l)
dg_l = tr.delta(g_l, t_l)
dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)
d_distance = tr.delta_distance(phi_l, g_l)
model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']
model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']
model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[
'standard_deviation']
model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'
]
model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']
model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']
model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[
'standard_deviation']
model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[
'standard_deviation']
with open('model.sauv', 'wb') as model_sauv_file:
pk.dump(model, model_sauv_file)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def training(dict):
model = {}
model['µ'] = {}
model['sigma'] = {}
for x in dict:
model['µ'][x] = {}
model['sigma'][x] = {}
for y in dict[x]:
model['µ'][x][y] = {}
model['sigma'][x][y] = {}
doc = tr.load(dict[x][y])
phi_l = doc[0]
g_l = doc[1]
t_l = doc[2]
dphi_l = tr.delta(phi_l, t_l)
dg_l = tr.delta(g_l, t_l)
dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)
d_distance = tr.delta_distance(phi_l, g_l)
model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']
model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']
model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[
'standard_deviation']
model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'
]
model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']
model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']
model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[
'standard_deviation']
model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[
'standard_deviation']
with open('model.sauv', 'wb') as model_sauv_file:
pk.dump(model, model_sauv_file)
return model
training(md.model())
<|reserved_special_token_1|>
import traitement as tr
import pickle as pk
import model as md
def training(dict):
model = {}
model['µ'] = {}
model['sigma'] = {}
for x in dict:
model['µ'][x] = {}
model['sigma'][x] = {}
for y in dict[x]:
model['µ'][x][y] = {}
model['sigma'][x][y] = {}
doc = tr.load(dict[x][y])
phi_l = doc[0]
g_l = doc[1]
t_l = doc[2]
dphi_l = tr.delta(phi_l, t_l)
dg_l = tr.delta(g_l, t_l)
dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)
d_distance = tr.delta_distance(phi_l, g_l)
model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']
model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']
model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[
'standard_deviation']
model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'
]
model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']
model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']
model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[
'standard_deviation']
model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[
'standard_deviation']
with open('model.sauv', 'wb') as model_sauv_file:
pk.dump(model, model_sauv_file)
return model
training(md.model())
<|reserved_special_token_1|>
#------------------------------------------------------------------------
#
# @Author : EV2 CHEVALLIER
#
# @Date : 16.09.20
# @Location : École Navale / Chaire de Cyberdéfense des systèmes navals
# @Project : Projet de Fin d'Études
# @Subject : # Real time detection of cyber anomalies upon a NMEA network by using machine learning methods
#
#------------------------------------------------------------------------
# @Title : Training
#------------------------------------------------------------------------
# @Description : # This programm get the training dataset, extract the interesting features ( mean and standard deviation of variations of latitude,
# longitude, heading and distance )
# and put it in a python dictionnary and save it in a binary file with the pickle module.
#------------------------------------------------------------------------
import traitement as tr
import pickle as pk
import model as md
def training(dict):
model={}
model["µ"]={}
model["sigma"]={}
for x in dict: # loop with speed
model["µ"][x]={}
model["sigma"][x]={}
for y in dict[x]: # loop with heading
model["µ"][x][y] = {}
model["sigma"][x][y] = {}
doc=tr.load(dict[x][y]) # open the json file
phi_l=doc[0]
g_l=doc[1] # get a list of phi,g,t
t_l=doc[2]
dphi_l=tr.delta(phi_l,t_l) # compute the differences
dg_l=tr.delta(g_l,t_l)
dheading_l=tr.delta(tr.heading(phi_l,g_l),t_l)
d_distance=tr.delta_distance(phi_l,g_l)
# we build a model with the statistical values of the features : variation of latitude, longitude, heading and distance
model["µ"][x][y]["phi"] = tr.parameters(dphi_l)["mean"]
model["µ"][x][y]["g"] = tr.parameters(dg_l)["mean"] # met à jour le modele
model["sigma"][x][y]["phi"] = tr.parameters(dphi_l)["standard_deviation"]
model["sigma"][x][y]["g"] = tr.parameters(g_l)["standard_deviation"]
model["µ"][x][y]["heading"] = tr.parameters(dheading_l)["mean"]
model["µ"][x][y]["distance"] = tr.parameters(d_distance)["mean"]
model["sigma"][x][y]["heading"] = tr.parameters(dheading_l)["standard_deviation"]
model["sigma"][x][y]["distance"] = tr.parameters(d_distance)["standard_deviation"]
with open('model.sauv','wb' ) as model_sauv_file:
pk.dump(model, model_sauv_file) # save the model in a binary file
return model
training(md.model())
|
flexible
|
{
"blob_id": "6726c8f1b3ef9a0df74c25c1921203af3aaacb12",
"index": 8758,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef training(dict):\n model = {}\n model['µ'] = {}\n model['sigma'] = {}\n for x in dict:\n model['µ'][x] = {}\n model['sigma'][x] = {}\n for y in dict[x]:\n model['µ'][x][y] = {}\n model['sigma'][x][y] = {}\n doc = tr.load(dict[x][y])\n phi_l = doc[0]\n g_l = doc[1]\n t_l = doc[2]\n dphi_l = tr.delta(phi_l, t_l)\n dg_l = tr.delta(g_l, t_l)\n dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)\n d_distance = tr.delta_distance(phi_l, g_l)\n model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']\n model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']\n model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[\n 'standard_deviation']\n model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'\n ]\n model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']\n model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']\n model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[\n 'standard_deviation']\n model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[\n 'standard_deviation']\n with open('model.sauv', 'wb') as model_sauv_file:\n pk.dump(model, model_sauv_file)\n return model\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef training(dict):\n model = {}\n model['µ'] = {}\n model['sigma'] = {}\n for x in dict:\n model['µ'][x] = {}\n model['sigma'][x] = {}\n for y in dict[x]:\n model['µ'][x][y] = {}\n model['sigma'][x][y] = {}\n doc = tr.load(dict[x][y])\n phi_l = doc[0]\n g_l = doc[1]\n t_l = doc[2]\n dphi_l = tr.delta(phi_l, t_l)\n dg_l = tr.delta(g_l, t_l)\n dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)\n d_distance = tr.delta_distance(phi_l, g_l)\n model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']\n model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']\n model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[\n 'standard_deviation']\n model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'\n ]\n model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']\n model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']\n model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[\n 'standard_deviation']\n model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[\n 'standard_deviation']\n with open('model.sauv', 'wb') as model_sauv_file:\n pk.dump(model, model_sauv_file)\n return model\n\n\ntraining(md.model())\n",
"step-4": "import traitement as tr\nimport pickle as pk\nimport model as md\n\n\ndef training(dict):\n model = {}\n model['µ'] = {}\n model['sigma'] = {}\n for x in dict:\n model['µ'][x] = {}\n model['sigma'][x] = {}\n for y in dict[x]:\n model['µ'][x][y] = {}\n model['sigma'][x][y] = {}\n doc = tr.load(dict[x][y])\n phi_l = doc[0]\n g_l = doc[1]\n t_l = doc[2]\n dphi_l = tr.delta(phi_l, t_l)\n dg_l = tr.delta(g_l, t_l)\n dheading_l = tr.delta(tr.heading(phi_l, g_l), t_l)\n d_distance = tr.delta_distance(phi_l, g_l)\n model['µ'][x][y]['phi'] = tr.parameters(dphi_l)['mean']\n model['µ'][x][y]['g'] = tr.parameters(dg_l)['mean']\n model['sigma'][x][y]['phi'] = tr.parameters(dphi_l)[\n 'standard_deviation']\n model['sigma'][x][y]['g'] = tr.parameters(g_l)['standard_deviation'\n ]\n model['µ'][x][y]['heading'] = tr.parameters(dheading_l)['mean']\n model['µ'][x][y]['distance'] = tr.parameters(d_distance)['mean']\n model['sigma'][x][y]['heading'] = tr.parameters(dheading_l)[\n 'standard_deviation']\n model['sigma'][x][y]['distance'] = tr.parameters(d_distance)[\n 'standard_deviation']\n with open('model.sauv', 'wb') as model_sauv_file:\n pk.dump(model, model_sauv_file)\n return model\n\n\ntraining(md.model())\n",
"step-5": "#------------------------------------------------------------------------\n#\n# @Author : EV2 CHEVALLIER \n#\n# @Date : 16.09.20\n# @Location : École Navale / Chaire de Cyberdéfense des systèmes navals\n# @Project : Projet de Fin d'Études\n# @Subject : # Real time detection of cyber anomalies upon a NMEA network by using machine learning methods\n#\n#------------------------------------------------------------------------\n# @Title : Training\n#------------------------------------------------------------------------\n# @Description : # This programm get the training dataset, extract the interesting features ( mean and standard deviation of variations of latitude, \n# longitude, heading and distance )\n# and put it in a python dictionnary and save it in a binary file with the pickle module.\n\n#------------------------------------------------------------------------\n\n\nimport traitement as tr\nimport pickle as pk\nimport model as md\n\ndef training(dict):\n\n\n model={}\n model[\"µ\"]={}\n model[\"sigma\"]={}\n\n for x in dict: # loop with speed\n model[\"µ\"][x]={}\n model[\"sigma\"][x]={}\n\n for y in dict[x]: # loop with heading\n\n model[\"µ\"][x][y] = {}\n model[\"sigma\"][x][y] = {}\n\n doc=tr.load(dict[x][y]) # open the json file\n\n phi_l=doc[0]\n g_l=doc[1] # get a list of phi,g,t\n t_l=doc[2]\n\n dphi_l=tr.delta(phi_l,t_l) # compute the differences\n dg_l=tr.delta(g_l,t_l)\n dheading_l=tr.delta(tr.heading(phi_l,g_l),t_l)\n d_distance=tr.delta_distance(phi_l,g_l)\n\n# we build a model with the statistical values of the features : variation of latitude, longitude, heading and distance\n\n model[\"µ\"][x][y][\"phi\"] = tr.parameters(dphi_l)[\"mean\"]\n model[\"µ\"][x][y][\"g\"] = tr.parameters(dg_l)[\"mean\"] # met à jour le modele\n\n model[\"sigma\"][x][y][\"phi\"] = tr.parameters(dphi_l)[\"standard_deviation\"]\n model[\"sigma\"][x][y][\"g\"] = tr.parameters(g_l)[\"standard_deviation\"]\n\n\n model[\"µ\"][x][y][\"heading\"] = tr.parameters(dheading_l)[\"mean\"]\n model[\"µ\"][x][y][\"distance\"] = tr.parameters(d_distance)[\"mean\"]\n\n model[\"sigma\"][x][y][\"heading\"] = tr.parameters(dheading_l)[\"standard_deviation\"]\n model[\"sigma\"][x][y][\"distance\"] = tr.parameters(d_distance)[\"standard_deviation\"]\n\n with open('model.sauv','wb' ) as model_sauv_file: \n pk.dump(model, model_sauv_file) # save the model in a binary file\n\n return model\n\ntraining(md.model())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1]-1].values
#Fitting the Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
#Predicting a new result
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
#Visualizing the results
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.scatter(6.5, y_pred, color = 'green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
|
normal
|
{
"blob_id": "c8565e1b5659dd0908aabf91e07738a798dc3232",
"index": 1366,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregressor.fit(X, y)\n<mask token>\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1] - 1].values\n<mask token>\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X, y)\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1] - 1].values\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state=0)\nregressor.fit(X, y)\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color='red')\nplt.plot(X_grid, regressor.predict(X_grid), color='blue')\nplt.scatter(6.5, y_pred, color='green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:-1].values\ny = dataset.iloc[:, dataset.shape[1]-1].values\n\n#Fitting the Decision Tree Regression\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X, y)\n\n#Predicting a new result\ny_pred = regressor.predict(np.reshape([6.5], (-1, 1)))\n\n#Visualizing the results\nX_grid = np.arange(min(X), max(X), 0.1)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.scatter(6.5, y_pred, color = 'green')\nplt.title('Salary vs Title')\nplt.xlabel('Title')\nplt.ylabel('Salary')\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from setuptools import setup, find_packages
setup(
name="champ",
version="0.0.1",
description='Channel modeling in Python',
url='https://github.com/sgherbst/champ',
author='Steven Herbst',
author_email='sherbst@stanford.edu',
packages=['champ'],
include_package_data=True,
zip_safe=False,
install_requires=[
'numpy',
'scipy',
'matplotlib',
'mpltools',
'scikit-rf'
]
)
|
normal
|
{
"blob_id": "885fd32c9520dfdc2becd6b1a3d0c0f5f5397112",
"index": 7449,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='champ', version='0.0.1', description=\n 'Channel modeling in Python', url='https://github.com/sgherbst/champ',\n author='Steven Herbst', author_email='sherbst@stanford.edu', packages=[\n 'champ'], include_package_data=True, zip_safe=False, install_requires=[\n 'numpy', 'scipy', 'matplotlib', 'mpltools', 'scikit-rf'])\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='champ', version='0.0.1', description=\n 'Channel modeling in Python', url='https://github.com/sgherbst/champ',\n author='Steven Herbst', author_email='sherbst@stanford.edu', packages=[\n 'champ'], include_package_data=True, zip_safe=False, install_requires=[\n 'numpy', 'scipy', 'matplotlib', 'mpltools', 'scikit-rf'])\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(\n name=\"champ\",\n version=\"0.0.1\",\n description='Channel modeling in Python',\n url='https://github.com/sgherbst/champ',\n author='Steven Herbst',\n author_email='sherbst@stanford.edu',\n packages=['champ'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'numpy',\n 'scipy',\n 'matplotlib',\n 'mpltools',\n 'scikit-rf'\n ]\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .lasot import Lasot
from .got10k import Got10k
from .tracking_net import TrackingNet
from .imagenetvid import ImagenetVID
from .imagenetdet import ImagenetDET
from .coco_seq import MSCOCOSeq
from .vot import VOT
from .youtube_vos import YoutubeVOS
from .youtube_bb import YoutubeBB
|
flexible
|
{
"blob_id": "e12ca2c4592a629ce78cae7211fedaf02352a603",
"index": 4700,
"step-1": "<mask token>\n",
"step-2": "from .lasot import Lasot\nfrom .got10k import Got10k\nfrom .tracking_net import TrackingNet\nfrom .imagenetvid import ImagenetVID\nfrom .imagenetdet import ImagenetDET\nfrom .coco_seq import MSCOCOSeq\nfrom .vot import VOT\nfrom .youtube_vos import YoutubeVOS\nfrom .youtube_bb import YoutubeBB\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along('time')['time']
fs = 1 / (time[1] - time[0])
signal = signal.get_along('time')[signal.symbol]
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
spec_third = amp2db(spec_third, ref=2e-05)
Nm = _main_loudness(spec_third, field_type)
N, N_specific = _calc_slopes(Nm)
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(name='Critical band rate', unit='Bark',
initial=0, final=24, number=int(24 / 0.1), include_endpoint
=True)
N_specific = DataFreq(name=
'Specific loudness (Zwicker method for stationnary signal)',
symbol="N'_{zwst}", axes=[bark_data], values=N_specific,
unit='sone/Bark')
return N, N_specific, bark_axis
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along('time')['time']
fs = 1 / (time[1] - time[0])
signal = signal.get_along('time')[signal.symbol]
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
spec_third = amp2db(spec_third, ref=2e-05)
Nm = _main_loudness(spec_third, field_type)
N, N_specific = _calc_slopes(Nm)
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(name='Critical band rate', unit='Bark',
initial=0, final=24, number=int(24 / 0.1), include_endpoint
=True)
N_specific = DataFreq(name=
'Specific loudness (Zwicker method for stationnary signal)',
symbol="N'_{zwst}", axes=[bark_data], values=N_specific,
unit='sone/Bark')
return N, N_specific, bark_axis
<|reserved_special_token_1|>
import numpy as np
from mosqito.sound_level_meter import noct_spectrum
from mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness
from mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes
from mosqito.utils.conversion import amp2db
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along('time')['time']
fs = 1 / (time[1] - time[0])
signal = signal.get_along('time')[signal.symbol]
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
spec_third = amp2db(spec_third, ref=2e-05)
Nm = _main_loudness(spec_third, field_type)
N, N_specific = _calc_slopes(Nm)
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(name='Critical band rate', unit='Bark',
initial=0, final=24, number=int(24 / 0.1), include_endpoint
=True)
N_specific = DataFreq(name=
'Specific loudness (Zwicker method for stationnary signal)',
symbol="N'_{zwst}", axes=[bark_data], values=N_specific,
unit='sone/Bark')
return N, N_specific, bark_axis
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Third party imports
import numpy as np
# Local application imports
from mosqito.sound_level_meter import noct_spectrum
from mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness
from mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes
from mosqito.utils.conversion import amp2db
# Optional package import
try:
from SciDataTool import DataTime, DataLinspace, DataFreq
except ImportError:
DataTime = None
DataLinspace = None
DataFreq = None
def loudness_zwst(signal, fs=None, field_type="free", is_sdt_output=False):
"""Zwicker-loudness calculation for stationary signals
Calculates the acoustic loudness according to Zwicker method for
stationary signals.
Normatice reference:
ISO 532:1975 (method B)
DIN 45631:1991
ISO 532-1:2017 (method 1)
The code is based on BASIC program published in "Program for
calculating loudness according to DIN 45631 (ISO 532B)", E.Zwicker
and H.Fastl, J.A.S.J (E) 12, 1 (1991).
Note that due to normative continuity, as defined in the
preceeding standards, the method is in accordance with
ISO 226:1987 equal loudness contours (instead of ISO 226:2003)
Parameters
----------
signal : numpy.array or DataTime object
Signal time values [Pa]
fs : float, optional
Sampling frequency, can be omitted if the input is a DataTime
object. Default to None
field_type : str
Type of soundfield corresponding to spec_third ("free" by
default or "diffuse").
is_sdt_output : Bool, optional
If True, the outputs are returned as SciDataTool objects.
Default to False
Outputs
-------
N : float or numpy.array
The overall loudness array [sones], size (Ntime,).
N_specific : numpy.ndarray or DataFreq object
The specific loudness array [sones/bark], size (Nbark, Ntime).
bark_axis: numpy.array
The Bark axis array, size (Nbark,).
"""
# Manage SciDataTool input type
if DataTime is not None and isinstance(signal, DataTime):
time = signal.get_along("time")["time"]
fs = 1 / (time[1] - time[0])
signal = signal.get_along("time")[signal.symbol]
# Compute third octave band spectrum
spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)
# Compute dB values
spec_third = amp2db(spec_third, ref=2e-5)
# Compute main loudness
Nm = _main_loudness(spec_third, field_type)
# Computation of specific loudness pattern and integration of overall
# loudness by attaching slopes towards higher frequencies
N, N_specific = _calc_slopes(Nm)
# Define Bark axis
bark_axis = np.linspace(0.1, 24, int(24 / 0.1))
# Manage SciDataTool output type
if is_sdt_output:
if DataLinspace is None:
raise RuntimeError(
"In order to handle Data objects you need the 'SciDataTool' package."
)
else:
bark_data = DataLinspace(
name="Critical band rate",
unit="Bark",
initial=0,
final=24,
number=int(24 / 0.1),
include_endpoint=True,
)
N_specific = DataFreq(
name="Specific loudness (Zwicker method for stationnary signal)",
symbol="N'_{zwst}",
axes=[bark_data],
values=N_specific,
unit="sone/Bark",
)
return N, N_specific, bark_axis
|
flexible
|
{
"blob_id": "75716aaaca63f8ca6d32c885021c1dc0f9a12dac",
"index": 793,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-3": "<mask token>\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-4": "import numpy as np\nfrom mosqito.sound_level_meter import noct_spectrum\nfrom mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness\nfrom mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes\nfrom mosqito.utils.conversion import amp2db\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type='free', is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along('time')['time']\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along('time')[signal.symbol]\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n spec_third = amp2db(spec_third, ref=2e-05)\n Nm = _main_loudness(spec_third, field_type)\n N, N_specific = _calc_slopes(Nm)\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(name='Critical band rate', unit='Bark',\n initial=0, final=24, number=int(24 / 0.1), include_endpoint\n =True)\n N_specific = DataFreq(name=\n 'Specific loudness (Zwicker method for stationnary signal)',\n symbol=\"N'_{zwst}\", axes=[bark_data], values=N_specific,\n unit='sone/Bark')\n return N, N_specific, bark_axis\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Third party imports\nimport numpy as np\n\n# Local application imports\nfrom mosqito.sound_level_meter import noct_spectrum\nfrom mosqito.sq_metrics.loudness.loudness_zwst._main_loudness import _main_loudness\nfrom mosqito.sq_metrics.loudness.loudness_zwst._calc_slopes import _calc_slopes\nfrom mosqito.utils.conversion import amp2db\n\n# Optional package import\ntry:\n from SciDataTool import DataTime, DataLinspace, DataFreq\nexcept ImportError:\n DataTime = None\n DataLinspace = None\n DataFreq = None\n\n\ndef loudness_zwst(signal, fs=None, field_type=\"free\", is_sdt_output=False):\n \"\"\"Zwicker-loudness calculation for stationary signals\n\n Calculates the acoustic loudness according to Zwicker method for\n stationary signals.\n Normatice reference:\n ISO 532:1975 (method B)\n DIN 45631:1991\n ISO 532-1:2017 (method 1)\n The code is based on BASIC program published in \"Program for\n calculating loudness according to DIN 45631 (ISO 532B)\", E.Zwicker\n and H.Fastl, J.A.S.J (E) 12, 1 (1991).\n Note that due to normative continuity, as defined in the\n preceeding standards, the method is in accordance with\n ISO 226:1987 equal loudness contours (instead of ISO 226:2003)\n\n Parameters\n ----------\n signal : numpy.array or DataTime object\n Signal time values [Pa]\n fs : float, optional\n Sampling frequency, can be omitted if the input is a DataTime\n object. Default to None\n field_type : str\n Type of soundfield corresponding to spec_third (\"free\" by\n default or \"diffuse\").\n is_sdt_output : Bool, optional\n If True, the outputs are returned as SciDataTool objects.\n Default to False\n\n Outputs\n -------\n N : float or numpy.array\n The overall loudness array [sones], size (Ntime,).\n N_specific : numpy.ndarray or DataFreq object\n The specific loudness array [sones/bark], size (Nbark, Ntime).\n bark_axis: numpy.array\n The Bark axis array, size (Nbark,).\n \"\"\"\n\n # Manage SciDataTool input type\n if DataTime is not None and isinstance(signal, DataTime):\n time = signal.get_along(\"time\")[\"time\"]\n fs = 1 / (time[1] - time[0])\n signal = signal.get_along(\"time\")[signal.symbol]\n\n # Compute third octave band spectrum\n spec_third, _ = noct_spectrum(signal, fs, fmin=24, fmax=12600)\n\n # Compute dB values\n spec_third = amp2db(spec_third, ref=2e-5)\n\n # Compute main loudness\n Nm = _main_loudness(spec_third, field_type)\n\n # Computation of specific loudness pattern and integration of overall\n # loudness by attaching slopes towards higher frequencies\n N, N_specific = _calc_slopes(Nm)\n\n # Define Bark axis\n bark_axis = np.linspace(0.1, 24, int(24 / 0.1))\n\n # Manage SciDataTool output type\n if is_sdt_output:\n if DataLinspace is None:\n raise RuntimeError(\n \"In order to handle Data objects you need the 'SciDataTool' package.\"\n )\n else:\n bark_data = DataLinspace(\n name=\"Critical band rate\",\n unit=\"Bark\",\n initial=0,\n final=24,\n number=int(24 / 0.1),\n include_endpoint=True,\n )\n N_specific = DataFreq(\n name=\"Specific loudness (Zwicker method for stationnary signal)\",\n symbol=\"N'_{zwst}\",\n axes=[bark_data],\n values=N_specific,\n unit=\"sone/Bark\",\n )\n\n return N, N_specific, bark_axis\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time
import json
import pygame
from pygame.locals import *
import urllib.request
from pygame.color import THECOLORS
pygame.init()
Brack=[0,0,0]
White=[255,255,255]
Green=[0,255,0]
Red=[255,0,0]
Gray=[169,169,169]
button_text=["开 始","开 始","开 始","开 始","开 始"]
line=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400']
line0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4']
CGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]]
color=[Green,Green,Green,Green,Green]
button_text0="手动状态:"
button_text1=["工位0","工位1","工位2","工位3","工位4"]
Num=['0','1','2','3','4']
B0=[452,522,592,662,732]
screen = pygame.display.set_mode((1240,768),FULLSCREEN,32)
screen.fill(Brack)
pygame.draw.rect(screen,White,[420,134,400,500],0)
text=["工 序 甲:","工 序 乙:","工 序 丙:","工 序 丁:","工 序 戊:"]
text_0=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",22)
text_1=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",18)
text_2=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",15)
text_fmt0=text_0.render("操 作 界 面",2,Brack)
screen.blit(text_fmt0,(545,140))
pygame.display.update()
def Process(num,x,y,button_text,color):
text_fmt1=text_1.render(text[num],1,Brack)
screen.blit(text_fmt1,(x-127,y))
pygame.draw.rect(screen,Brack,[x,y,60,25],2)
pygame.draw.rect(screen,color,[x+2,y+2,57,22],0)
button=text_2.render(button_text,1,Brack)
screen.blit(button,(x+13,y+3))
pygame.display.update()
def Station(num,x,y,a):
pygame.draw.rect(screen,Brack,[x,y,55,28],2)
pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0)
button=text_2.render(button_text1[num],1,Brack)
screen.blit(button,(x+9,y+4))
img=pygame.image.load('cgq.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(x,y+80))
button=text_1.render(Num[a],1,Brack)
screen.blit(button,(x+20,610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen,White,[506,440,85,28],0)
pygame.draw.rect(screen,Brack,[597,440,65,28],2)
pygame.draw.rect(screen,Green,[599,442,62,25],0)
button1=text_1.render("切 换",1,Brack)
screen.blit(button1,(611,444))
button=text_1.render(button_text0,1,Brack)
screen.blit(button,(506,444))
B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]]
if button_text==["开 始","开 始","开 始","开 始","开 始"]:
response2=urllib.request.urlopen('http://localhost:5000/carrier/status')
html2=response2.read()
text2=json.loads(html2)
a=text2['sensors']
b=text2['pos']
C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]]
pygame.draw.rect(screen,White,[420,525,400,50],0)
pygame.draw.rect(screen,White,[420,615,400,30],0)
img=pygame.image.load('car.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(B0[b],525))
if button_text0=="手动状态:":
for t in range(5):
if button_text[t]=="结 束":
button_text[t]="开 始"
color[t]=Green
elif button_text0=="自动状态:":
if button_text[0]=="结 束":
response0=urllib.request.urlopen(line[0])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[0]="开 始"
button_text[1]="结 束"
elif button_text[1]=="结 束":
response0=urllib.request.urlopen(line[1])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[1]="开 始"
button_text[2]="结 束"
elif button_text[2]=="结 束":
response0=urllib.request.urlopen(line[2])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[2]="开 始"
button_text[3]="结 束"
elif button_text[3]=="结 束":
response0=urllib.request.urlopen(line[3])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[3]="开 始"
button_text[4]="结 束"
elif button_text[4]=="结 束":
response0=urllib.request.urlopen(line[4])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[4]="开 始"
for i in B:
Process(i[0],i[1],i[2],i[3],i[4])
for v in C:
Station(v[0],v[1],v[2],v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index==0:
if 597<=pos[0]<=662 and 440<=pos[1]<=468:
if button_text0=="自动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]:
button_text0="手动状态:"
color=[Green,Green,Green,Green,Green]
elif button_text0=="手动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]:
button_text0="自动状态:"
button_text[0]="结 束"
color=[Gray,Gray,Gray,Gray,Gray]
for i in B:
if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25:
if button_text==["开 始","开 始","开 始","开 始","开 始"] and button_text0=="手动状态:":
color[i[0]]=Red
button_text[i[0]]="结 束"
response1=urllib.request.urlopen(line[i[0]])
html1=response1.read()
text1=json.loads(html1)
print(text1)
for v in C:
if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28:
response3=urllib.request.urlopen(line0[v[0]])
html3=response3.read()
text3=json.loads(html3)
pygame.draw.rect(screen,White,[420,525,400,50],0)
pygame.draw.rect(screen,White,[420,615,400,30],0)
img=pygame.image.load('car.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(B0[int(text3)],525))
C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]]
for f in C:
Station(f[0],f[1],f[2],f[3])
pygame.display.update()
|
normal
|
{
"blob_id": "609071fc3af1b526fbd4555ced2376f56ae0f3c3",
"index": 2174,
"step-1": "<mask token>\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\n<mask token>\n",
"step-2": "<mask token>\npygame.init()\n<mask token>\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\n<mask token>\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n",
"step-3": "<mask token>\npygame.init()\nBrack = [0, 0, 0]\nWhite = [255, 255, 255]\nGreen = [0, 255, 0]\nRed = [255, 0, 0]\nGray = [169, 169, 169]\nbutton_text = ['开 始', '开 始', '开 始', '开 始', '开 始']\nline = ['http://localhost:5050/mixer/000',\n 'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',\n 'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']\nline0 = ['http://localhost:5000/carrier/moveto/0',\n 'http://localhost:5000/carrier/moveto/1',\n 'http://localhost:5000/carrier/moveto/2',\n 'http://localhost:5000/carrier/moveto/3',\n 'http://localhost:5000/carrier/moveto/4']\nCGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 0]]\ncolor = [Green, Green, Green, Green, Green]\nbutton_text0 = '手动状态:'\nbutton_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']\nNum = ['0', '1', '2', '3', '4']\nB0 = [452, 522, 592, 662, 732]\nscreen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\ntext = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']\ntext_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)\ntext_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)\ntext_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)\ntext_fmt0 = text_0.render('操 作 界 面', 2, Brack)\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n",
"step-4": "import time\nimport json\nimport pygame\nfrom pygame.locals import *\nimport urllib.request\nfrom pygame.color import THECOLORS\npygame.init()\nBrack = [0, 0, 0]\nWhite = [255, 255, 255]\nGreen = [0, 255, 0]\nRed = [255, 0, 0]\nGray = [169, 169, 169]\nbutton_text = ['开 始', '开 始', '开 始', '开 始', '开 始']\nline = ['http://localhost:5050/mixer/000',\n 'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',\n 'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']\nline0 = ['http://localhost:5000/carrier/moveto/0',\n 'http://localhost:5000/carrier/moveto/1',\n 'http://localhost:5000/carrier/moveto/2',\n 'http://localhost:5000/carrier/moveto/3',\n 'http://localhost:5000/carrier/moveto/4']\nCGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 0]]\ncolor = [Green, Green, Green, Green, Green]\nbutton_text0 = '手动状态:'\nbutton_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']\nNum = ['0', '1', '2', '3', '4']\nB0 = [452, 522, 592, 662, 732]\nscreen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\ntext = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']\ntext_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)\ntext_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)\ntext_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)\ntext_fmt0 = text_0.render('操 作 界 面', 2, Brack)\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n",
"step-5": "import time\nimport json\nimport pygame\nfrom pygame.locals import *\nimport urllib.request\nfrom pygame.color import THECOLORS\npygame.init()\nBrack=[0,0,0]\nWhite=[255,255,255]\nGreen=[0,255,0]\nRed=[255,0,0]\nGray=[169,169,169]\nbutton_text=[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]\nline=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400']\nline0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4']\nCGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]]\ncolor=[Green,Green,Green,Green,Green]\nbutton_text0=\"手动状态:\"\nbutton_text1=[\"工位0\",\"工位1\",\"工位2\",\"工位3\",\"工位4\"]\nNum=['0','1','2','3','4']\nB0=[452,522,592,662,732]\nscreen = pygame.display.set_mode((1240,768),FULLSCREEN,32)\nscreen.fill(Brack)\npygame.draw.rect(screen,White,[420,134,400,500],0)\ntext=[\"工 序 甲:\",\"工 序 乙:\",\"工 序 丙:\",\"工 序 丁:\",\"工 序 戊:\"]\ntext_0=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",22)\ntext_1=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",18)\ntext_2=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",15)\ntext_fmt0=text_0.render(\"操 作 界 面\",2,Brack)\nscreen.blit(text_fmt0,(545,140))\npygame.display.update()\ndef Process(num,x,y,button_text,color):\n text_fmt1=text_1.render(text[num],1,Brack)\n screen.blit(text_fmt1,(x-127,y))\n pygame.draw.rect(screen,Brack,[x,y,60,25],2)\n pygame.draw.rect(screen,color,[x+2,y+2,57,22],0)\n button=text_2.render(button_text,1,Brack)\n screen.blit(button,(x+13,y+3))\n pygame.display.update()\ndef Station(num,x,y,a):\n pygame.draw.rect(screen,Brack,[x,y,55,28],2)\n pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0)\n button=text_2.render(button_text1[num],1,Brack)\n screen.blit(button,(x+9,y+4))\n img=pygame.image.load('cgq.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(x,y+80))\n button=text_1.render(Num[a],1,Brack)\n screen.blit(button,(x+20,610))\n pygame.display.update()\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen,White,[506,440,85,28],0)\n pygame.draw.rect(screen,Brack,[597,440,65,28],2)\n pygame.draw.rect(screen,Green,[599,442,62,25],0)\n button1=text_1.render(\"切 换\",1,Brack)\n screen.blit(button1,(611,444))\n button=text_1.render(button_text0,1,Brack)\n screen.blit(button,(506,444))\n B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]]\n if button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n response2=urllib.request.urlopen('http://localhost:5000/carrier/status')\n html2=response2.read()\n text2=json.loads(html2)\n a=text2['sensors']\n b=text2['pos']\n C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]]\n pygame.draw.rect(screen,White,[420,525,400,50],0)\n pygame.draw.rect(screen,White,[420,615,400,30],0)\n img=pygame.image.load('car.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(B0[b],525))\n if button_text0==\"手动状态:\":\n for t in range(5):\n if button_text[t]==\"结 束\":\n button_text[t]=\"开 始\"\n color[t]=Green\n elif button_text0==\"自动状态:\":\n if button_text[0]==\"结 束\":\n response0=urllib.request.urlopen(line[0])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[0]=\"开 始\"\n button_text[1]=\"结 束\"\n elif button_text[1]==\"结 束\":\n response0=urllib.request.urlopen(line[1])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[1]=\"开 始\"\n button_text[2]=\"结 束\"\n elif button_text[2]==\"结 束\":\n response0=urllib.request.urlopen(line[2])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[2]=\"开 始\"\n button_text[3]=\"结 束\"\n elif button_text[3]==\"结 束\":\n response0=urllib.request.urlopen(line[3])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[3]=\"开 始\"\n button_text[4]=\"结 束\"\n elif button_text[4]==\"结 束\":\n response0=urllib.request.urlopen(line[4])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[4]=\"开 始\"\n for i in B:\n Process(i[0],i[1],i[2],i[3],i[4])\n for v in C:\n Station(v[0],v[1],v[2],v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index==0:\n if 597<=pos[0]<=662 and 440<=pos[1]<=468:\n if button_text0==\"自动状态:\" and button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n button_text0=\"手动状态:\"\n color=[Green,Green,Green,Green,Green]\n elif button_text0==\"手动状态:\" and button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n button_text0=\"自动状态:\"\n button_text[0]=\"结 束\"\n color=[Gray,Gray,Gray,Gray,Gray]\n for i in B:\n if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25:\n if button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"] and button_text0==\"手动状态:\":\n color[i[0]]=Red\n button_text[i[0]]=\"结 束\"\n response1=urllib.request.urlopen(line[i[0]])\n html1=response1.read()\n text1=json.loads(html1)\n print(text1)\n for v in C:\n if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28:\n response3=urllib.request.urlopen(line0[v[0]])\n html3=response3.read()\n text3=json.loads(html3)\n pygame.draw.rect(screen,White,[420,525,400,50],0)\n pygame.draw.rect(screen,White,[420,615,400,30],0)\n img=pygame.image.load('car.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(B0[int(text3)],525))\n C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]]\n for f in C:\n Station(f[0],f[1],f[2],f[3])\n pygame.display.update()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import iris
import numpy as np
import matplotlib.pyplot as plt
import glob
import iris.analysis.cartography
import iris.coord_categorisation
import iris.analysis
import time
def my_callback(cube, field, filename):
cube.remove_coord('forecast_reference_time')
cube.remove_coord('forecast_period')
#the cubes were not merging properly before, because the time coordinate appeard to have teo different names... I think this may work
directory = '/data/data1/ph290/hadgem2es_co2/n_atlantic_co2/'
output_directory = ('/home/ph290/data1/hadgem2es_co2/global_avg/')
runs = glob.glob(directory+'//?????')
run_names = []
run_global_means = []
run_date = []
for i,run in enumerate(runs):
print i
run_name = run.split('/')[7]
run_names.append(run_name)
cube = iris.load_cube(run+'/*.pp',iris.AttributeConstraint(STASH='m02s30i249'),callback=my_callback)
if not cube.coord('longitude').has_bounds():
cube.coord('longitude').guess_bounds()
if not cube.coord('latitude').has_bounds():
cube.coord('latitude').guess_bounds()
grid_areas = iris.analysis.cartography.area_weights(cube)
time_mean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)
run_global_means.append(time_mean.data)
coord = cube.coord('time')
year = np.array([coord.units.num2date(value).year for value in coord.points])
run_date.append(year)
np.savetxt(output_directory + run_name + '.txt',np.vstack((year,time_mean.data)).T,delimiter=',')
fig = plt.figure()
for i,data in enumerate(run_global_means):
plt.plot(run_date[i],i,data,'k')
plt.xlabel('year')
plt.ylabel('air-sea CO$_2$ flux')
plt.show()
|
normal
|
{
"blob_id": "6ea651e27620d0f26f7364e6d9d57e733b158d77",
"index": 6466,
"step-1": "import iris\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport iris.analysis.cartography\nimport iris.coord_categorisation\nimport iris.analysis\nimport time\n\ndef my_callback(cube, field, filename):\n cube.remove_coord('forecast_reference_time')\n cube.remove_coord('forecast_period')\n #the cubes were not merging properly before, because the time coordinate appeard to have teo different names... I think this may work\n\ndirectory = '/data/data1/ph290/hadgem2es_co2/n_atlantic_co2/'\noutput_directory = ('/home/ph290/data1/hadgem2es_co2/global_avg/')\n\nruns = glob.glob(directory+'//?????')\n\nrun_names = []\nrun_global_means = []\nrun_date = []\n\nfor i,run in enumerate(runs):\n print i\n run_name = run.split('/')[7]\n run_names.append(run_name)\n cube = iris.load_cube(run+'/*.pp',iris.AttributeConstraint(STASH='m02s30i249'),callback=my_callback)\n if not cube.coord('longitude').has_bounds():\n cube.coord('longitude').guess_bounds()\n if not cube.coord('latitude').has_bounds():\n cube.coord('latitude').guess_bounds()\n grid_areas = iris.analysis.cartography.area_weights(cube)\n time_mean = cube.collapsed(['longitude', 'latitude'], iris.analysis.MEAN, weights=grid_areas)\n run_global_means.append(time_mean.data)\n coord = cube.coord('time')\n year = np.array([coord.units.num2date(value).year for value in coord.points])\n run_date.append(year)\n np.savetxt(output_directory + run_name + '.txt',np.vstack((year,time_mean.data)).T,delimiter=',')\n\nfig = plt.figure()\nfor i,data in enumerate(run_global_means):\n plt.plot(run_date[i],i,data,'k')\n plt.xlabel('year')\n plt.ylabel('air-sea CO$_2$ flux')\n\nplt.show()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
:Author :weijinlong
:Time: :2020/1/10 17:22
:File :graph.py
:content:
"""
import tensorflow as tf
from .base import TFLayer
class TFModel(TFLayer):
def build_model(self):
raise NotImplementedError
def add_outputs(self, *args, **kwargs):
"""模型的输出值
:param args:
:param kwargs:
:return:
"""
outputs = {}
for value in args:
assert isinstance(value, tf.Tensor), "function add_outputs parameter's value must be tf.Tensor"
name = value.name
outputs[name.split(':')[0]] = name
for key, value in kwargs.items():
assert isinstance(value, tf.Tensor), "function add_outputs parameter's value must be tf.Tensor"
outputs[key] = value.name
self.update_outputs(outputs)
class TFCompile(TFLayer):
def compile(self):
raise NotImplementedError
def add_metrics(self, *args, **kwargs):
"""加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作
:param args:
:param kwargs:
:return:
"""
metrics = {}
for value in args:
assert isinstance(value, (tf.Operation, tf.Tensor)), \
"function add_metrics parameter's value must be tf.Operation"
name = value.name
metrics[name.split(':')[0]] = name
for key, value in kwargs.items():
assert isinstance(value, (tf.Operation, tf.Tensor)), \
"function add_metrics parameter's value must be tf.Operation"
metrics[key] = value.name
self.update_metrics(metrics)
@property
def fetches(self):
""" 获取模型输出值或者评估值, 来优化训练模型
:return:
"""
return self.metrics
class TFComModel(TFModel, TFCompile):
"""
基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译
"""
def build_model(self):
raise NotImplementedError
def compile(self):
pass
|
normal
|
{
"blob_id": "cdabb4a118cb0ef55c271a446fa190a457ebe142",
"index": 7383,
"step-1": "<mask token>\n\n\nclass TFCompile(TFLayer):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass TFCompile(TFLayer):\n\n def compile(self):\n raise NotImplementedError\n\n def add_metrics(self, *args, **kwargs):\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n metrics = {}\n for value in args:\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n name = value.name\n metrics[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n metrics[key] = value.name\n self.update_metrics(metrics)\n\n @property\n def fetches(self):\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\n\n :return:\n \"\"\"\n return self.metrics\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass TFModel(TFLayer):\n <mask token>\n\n def add_outputs(self, *args, **kwargs):\n \"\"\"模型的输出值\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n outputs = {}\n for value in args:\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n name = value.name\n outputs[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n outputs[key] = value.name\n self.update_outputs(outputs)\n\n\nclass TFCompile(TFLayer):\n\n def compile(self):\n raise NotImplementedError\n\n def add_metrics(self, *args, **kwargs):\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n metrics = {}\n for value in args:\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n name = value.name\n metrics[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n metrics[key] = value.name\n self.update_metrics(metrics)\n\n @property\n def fetches(self):\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\n\n :return:\n \"\"\"\n return self.metrics\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-4": "<mask token>\n\n\nclass TFModel(TFLayer):\n\n def build_model(self):\n raise NotImplementedError\n\n def add_outputs(self, *args, **kwargs):\n \"\"\"模型的输出值\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n outputs = {}\n for value in args:\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n name = value.name\n outputs[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, tf.Tensor\n ), \"function add_outputs parameter's value must be tf.Tensor\"\n outputs[key] = value.name\n self.update_outputs(outputs)\n\n\nclass TFCompile(TFLayer):\n\n def compile(self):\n raise NotImplementedError\n\n def add_metrics(self, *args, **kwargs):\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n metrics = {}\n for value in args:\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n name = value.name\n metrics[name.split(':')[0]] = name\n for key, value in kwargs.items():\n assert isinstance(value, (tf.Operation, tf.Tensor)\n ), \"function add_metrics parameter's value must be tf.Operation\"\n metrics[key] = value.name\n self.update_metrics(metrics)\n\n @property\n def fetches(self):\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\n\n :return:\n \"\"\"\n return self.metrics\n\n\nclass TFComModel(TFModel, TFCompile):\n \"\"\"\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\n \"\"\"\n\n def build_model(self):\n raise NotImplementedError\n\n def compile(self):\n pass\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding:utf-8 _*-\r\n\r\n\"\"\"\r\n:Author :weijinlong\r\n:Time: :2020/1/10 17:22\r\n:File :graph.py\r\n:content:\r\n \r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\n\r\nfrom .base import TFLayer\r\n\r\n\r\nclass TFModel(TFLayer):\r\n\r\n def build_model(self):\r\n raise NotImplementedError\r\n\r\n def add_outputs(self, *args, **kwargs):\r\n \"\"\"模型的输出值\r\n\r\n :param args:\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n outputs = {}\r\n for value in args:\r\n assert isinstance(value, tf.Tensor), \"function add_outputs parameter's value must be tf.Tensor\"\r\n name = value.name\r\n outputs[name.split(':')[0]] = name\r\n for key, value in kwargs.items():\r\n assert isinstance(value, tf.Tensor), \"function add_outputs parameter's value must be tf.Tensor\"\r\n outputs[key] = value.name\r\n self.update_outputs(outputs)\r\n\r\n\r\nclass TFCompile(TFLayer):\r\n\r\n def compile(self):\r\n raise NotImplementedError\r\n\r\n def add_metrics(self, *args, **kwargs):\r\n \"\"\"加入模型的评估指标、优化操作等,例如损失值,正确率等张量或者操作\r\n\r\n :param args:\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n metrics = {}\r\n for value in args:\r\n assert isinstance(value, (tf.Operation, tf.Tensor)), \\\r\n \"function add_metrics parameter's value must be tf.Operation\"\r\n name = value.name\r\n metrics[name.split(':')[0]] = name\r\n for key, value in kwargs.items():\r\n assert isinstance(value, (tf.Operation, tf.Tensor)), \\\r\n \"function add_metrics parameter's value must be tf.Operation\"\r\n metrics[key] = value.name\r\n self.update_metrics(metrics)\r\n\r\n @property\r\n def fetches(self):\r\n \"\"\" 获取模型输出值或者评估值, 来优化训练模型\r\n\r\n :return:\r\n \"\"\"\r\n return self.metrics\r\n\r\n\r\nclass TFComModel(TFModel, TFCompile):\r\n \"\"\"\r\n 基于TensorFlow的复合模型,即使用一个算子构建模型的和模型的编译\r\n \"\"\"\r\n\r\n def build_model(self):\r\n raise NotImplementedError\r\n\r\n def compile(self):\r\n pass\r\n",
"step-ids": [
5,
8,
10,
11,
13
]
}
|
[
5,
8,
10,
11,
13
] |
import pulumi
import pulumi_aws as aws
bar = aws.elasticache.get_replication_group(replication_group_id="example")
|
normal
|
{
"blob_id": "4bf140ae01f2eaa0c67f667766c3ec921d552066",
"index": 6073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbar = aws.elasticache.get_replication_group(replication_group_id='example')\n",
"step-3": "import pulumi\nimport pulumi_aws as aws\nbar = aws.elasticache.get_replication_group(replication_group_id='example')\n",
"step-4": "import pulumi\nimport pulumi_aws as aws\n\nbar = aws.elasticache.get_replication_group(replication_group_id=\"example\")\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SlidingDoorIllustration(Scene):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SlidingDoorIllustration(Scene):
def construct(self):
waiting_room = Rectangle(color=BLUE, stroke_width=8)
waiting_room.shift(LEFT + DOWN)
workspace = Rectangle(color=BLUE, stroke_width=8)
workspace.next_to(waiting_room, RIGHT + UP, buff=0)
workspace.shift(LEFT)
t1 = Text('Waiting Room').move_to(waiting_room.get_center()).scale(0.5)
t2 = Text('Workspace').move_to(workspace.get_center()).scale(0.5)
doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.
get_corner(UR), color=RED, stroke_width=8)
door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR),
color=GREEN, stroke_width=8)
self.add(waiting_room, workspace, t1, t2, doors, door)
self.play(door.animate.shift(LEFT))
self.wait()
self.play(door.animate.shift(RIGHT))
self.wait()
<|reserved_special_token_1|>
from manim import *
class SlidingDoorIllustration(Scene):
def construct(self):
waiting_room = Rectangle(color=BLUE, stroke_width=8)
waiting_room.shift(LEFT + DOWN)
workspace = Rectangle(color=BLUE, stroke_width=8)
workspace.next_to(waiting_room, RIGHT + UP, buff=0)
workspace.shift(LEFT)
t1 = Text('Waiting Room').move_to(waiting_room.get_center()).scale(0.5)
t2 = Text('Workspace').move_to(workspace.get_center()).scale(0.5)
doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.
get_corner(UR), color=RED, stroke_width=8)
door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR),
color=GREEN, stroke_width=8)
self.add(waiting_room, workspace, t1, t2, doors, door)
self.play(door.animate.shift(LEFT))
self.wait()
self.play(door.animate.shift(RIGHT))
self.wait()
<|reserved_special_token_1|>
from manim import *
class SlidingDoorIllustration(Scene):
def construct(self):
waiting_room = Rectangle(color=BLUE, stroke_width=8)
waiting_room.shift(LEFT + DOWN)
workspace = Rectangle(color=BLUE, stroke_width=8)
workspace.next_to(waiting_room, RIGHT + UP, buff=0)
workspace.shift(LEFT)
t1 = Text("Waiting Room").move_to(waiting_room.get_center()).scale(0.5)
t2 = Text("Workspace").move_to(workspace.get_center()).scale(0.5)
doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.get_corner(UR), color=RED, stroke_width=8)
door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR), color=GREEN, stroke_width=8)
self.add(waiting_room, workspace, t1, t2, doors, door)
self.play(door.animate.shift(LEFT))
self.wait()
self.play(door.animate.shift(RIGHT))
self.wait()
|
flexible
|
{
"blob_id": "e93d5461a2604d3b8015489397c68e16d1cb222e",
"index": 3695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SlidingDoorIllustration(Scene):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SlidingDoorIllustration(Scene):\n\n def construct(self):\n waiting_room = Rectangle(color=BLUE, stroke_width=8)\n waiting_room.shift(LEFT + DOWN)\n workspace = Rectangle(color=BLUE, stroke_width=8)\n workspace.next_to(waiting_room, RIGHT + UP, buff=0)\n workspace.shift(LEFT)\n t1 = Text('Waiting Room').move_to(waiting_room.get_center()).scale(0.5)\n t2 = Text('Workspace').move_to(workspace.get_center()).scale(0.5)\n doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.\n get_corner(UR), color=RED, stroke_width=8)\n door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR),\n color=GREEN, stroke_width=8)\n self.add(waiting_room, workspace, t1, t2, doors, door)\n self.play(door.animate.shift(LEFT))\n self.wait()\n self.play(door.animate.shift(RIGHT))\n self.wait()\n",
"step-4": "from manim import *\n\n\nclass SlidingDoorIllustration(Scene):\n\n def construct(self):\n waiting_room = Rectangle(color=BLUE, stroke_width=8)\n waiting_room.shift(LEFT + DOWN)\n workspace = Rectangle(color=BLUE, stroke_width=8)\n workspace.next_to(waiting_room, RIGHT + UP, buff=0)\n workspace.shift(LEFT)\n t1 = Text('Waiting Room').move_to(waiting_room.get_center()).scale(0.5)\n t2 = Text('Workspace').move_to(workspace.get_center()).scale(0.5)\n doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.\n get_corner(UR), color=RED, stroke_width=8)\n door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR),\n color=GREEN, stroke_width=8)\n self.add(waiting_room, workspace, t1, t2, doors, door)\n self.play(door.animate.shift(LEFT))\n self.wait()\n self.play(door.animate.shift(RIGHT))\n self.wait()\n",
"step-5": "from manim import *\n\n\nclass SlidingDoorIllustration(Scene):\n def construct(self):\n waiting_room = Rectangle(color=BLUE, stroke_width=8)\n waiting_room.shift(LEFT + DOWN)\n workspace = Rectangle(color=BLUE, stroke_width=8)\n workspace.next_to(waiting_room, RIGHT + UP, buff=0)\n workspace.shift(LEFT)\n t1 = Text(\"Waiting Room\").move_to(waiting_room.get_center()).scale(0.5)\n t2 = Text(\"Workspace\").move_to(workspace.get_center()).scale(0.5)\n doors = Line(workspace.get_corner(DL) + LEFT, waiting_room.get_corner(UR), color=RED, stroke_width=8)\n door = Line(workspace.get_corner(DL), waiting_room.get_corner(UR), color=GREEN, stroke_width=8)\n self.add(waiting_room, workspace, t1, t2, doors, door)\n self.play(door.animate.shift(LEFT))\n self.wait()\n self.play(door.animate.shift(RIGHT))\n self.wait()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert ec.scale(4, ec.order) == 0
<|reserved_special_token_0|>
print('Factoring...')
<|reserved_special_token_0|>
for i in range(2, 2 ** 24):
if x % i == 0:
if x % (i * i) != 0:
factors.append(i)
x = pp(x, i)
print('Getting remainders...')
<|reserved_special_token_0|>
for f in factors:
u = 0
while u == 0:
while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):
u = randint(1, ec.prime - 1)
u = ec.scale(u, pp(twist_ord, f))
while ec.scale(u, f) != 0:
u = ec.scale(u, f)
shared = ec.scale(u, aPriv)
for i in range(f):
if ec.scale(u, i) == shared:
print('\tSolved mod %d' % f)
rems.append(i)
break
print('Correcting parities...')
for i in range(len(factors)):
if rems[i] != 0:
break
<|reserved_special_token_0|>
for i in range(len(factors)):
if i == fixed:
continue
u = 0
while u == 0:
while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):
u = randint(1, ec.prime - 1)
u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))
if ec.scale(u, factors[fixed]) == 0:
u = 0
elif ec.scale(u, factors[i]) == 0:
u = 0
shared = ec.scale(u, aPriv)
r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])
if ec.scale(u, r) != shared:
rems[i] = -rems[i] % factors[i]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ec = EC_M(233970423115425145524320034830162017933, 534, 1, 4, order=
233970423115425145498902418297807005944)
assert ec.scale(4, ec.order) == 0
aPriv = randint(1, ec.order - 1)
aPub = ec.scale(4, aPriv)
print('Factoring...')
twist_ord = 2 * ec.prime + 2 - ec.order
factors = []
x = twist_ord
for i in range(2, 2 ** 24):
if x % i == 0:
if x % (i * i) != 0:
factors.append(i)
x = pp(x, i)
print('Getting remainders...')
rems = []
for f in factors:
u = 0
while u == 0:
while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):
u = randint(1, ec.prime - 1)
u = ec.scale(u, pp(twist_ord, f))
while ec.scale(u, f) != 0:
u = ec.scale(u, f)
shared = ec.scale(u, aPriv)
for i in range(f):
if ec.scale(u, i) == shared:
print('\tSolved mod %d' % f)
rems.append(i)
break
print('Correcting parities...')
for i in range(len(factors)):
if rems[i] != 0:
break
fixed = i
for i in range(len(factors)):
if i == fixed:
continue
u = 0
while u == 0:
while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):
u = randint(1, ec.prime - 1)
u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))
if ec.scale(u, factors[fixed]) == 0:
u = 0
elif ec.scale(u, factors[i]) == 0:
u = 0
shared = ec.scale(u, aPriv)
r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])
if ec.scale(u, r) != shared:
rems[i] = -rems[i] % factors[i]
<|reserved_special_token_1|>
from matasano import *
ec = EC_M(233970423115425145524320034830162017933, 534, 1, 4, order=
233970423115425145498902418297807005944)
assert ec.scale(4, ec.order) == 0
aPriv = randint(1, ec.order - 1)
aPub = ec.scale(4, aPriv)
print('Factoring...')
twist_ord = 2 * ec.prime + 2 - ec.order
factors = []
x = twist_ord
for i in range(2, 2 ** 24):
if x % i == 0:
if x % (i * i) != 0:
factors.append(i)
x = pp(x, i)
print('Getting remainders...')
rems = []
for f in factors:
u = 0
while u == 0:
while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):
u = randint(1, ec.prime - 1)
u = ec.scale(u, pp(twist_ord, f))
while ec.scale(u, f) != 0:
u = ec.scale(u, f)
shared = ec.scale(u, aPriv)
for i in range(f):
if ec.scale(u, i) == shared:
print('\tSolved mod %d' % f)
rems.append(i)
break
print('Correcting parities...')
for i in range(len(factors)):
if rems[i] != 0:
break
fixed = i
for i in range(len(factors)):
if i == fixed:
continue
u = 0
while u == 0:
while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):
u = randint(1, ec.prime - 1)
u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))
if ec.scale(u, factors[fixed]) == 0:
u = 0
elif ec.scale(u, factors[i]) == 0:
u = 0
shared = ec.scale(u, aPriv)
r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])
if ec.scale(u, r) != shared:
rems[i] = -rems[i] % factors[i]
<|reserved_special_token_1|>
from matasano import *
ec = EC_M(233970423115425145524320034830162017933,534,1,4,order=233970423115425145498902418297807005944)
assert(ec.scale(4,ec.order) == 0)
aPriv = randint(1,ec.order-1)
aPub = ec.scale(4,aPriv)
print("Factoring...")
twist_ord = 2*ec.prime+2 - ec.order
factors = []
x = twist_ord
for i in range(2,2**24):
if x%i == 0:
if x%(i*i) != 0:
factors.append(i)
x = pp(x,i)
print("Getting remainders...")
rems = []
for f in factors:
u = 0
while u == 0:
while isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):
u = randint(1,ec.prime-1)
u = ec.scale(u,pp(twist_ord,f))
while ec.scale(u,f) != 0:
u = ec.scale(u,f)
shared = ec.scale(u,aPriv) #Not generating the MAC this time
for i in range(f):
if ec.scale(u,i) == shared:
print("\tSolved mod %d"%f)
rems.append(i)
break
#Now aPriv is +-rems[i] mod factors[i]
#Do them 2 at a time to get down to 2 values mod Prod factors[i]
print("Correcting parities...")
for i in range(len(factors)):
if rems[i] != 0:
break
fixed = i
for i in range(len(factors)):
if i == fixed:
continue
u = 0
while u == 0:
while isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):
u = randint(1,ec.prime-1)
u = ec.scale(u,pp(pp(twist_ord,factors[fixed]),factors[i]))
if ec.scale(u,factors[fixed]) == 0:
u = 0
elif ec.scale(u,factors[i]) == 0:
u = 0
shared = ec.scale(u,aPriv)
r,_ = crt([rems[fixed],rems[i]],[factors[fixed],factors[i]])
if ec.scale(u,r) != shared:
rems[i] = (-rems[i])%factors[i]
#Now I need to run down the remaining bits
|
flexible
|
{
"blob_id": "b5275fc068526063fd8baf13210052971b05503f",
"index": 585,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nassert ec.scale(4, ec.order) == 0\n<mask token>\nprint('Factoring...')\n<mask token>\nfor i in range(2, 2 ** 24):\n if x % i == 0:\n if x % (i * i) != 0:\n factors.append(i)\n x = pp(x, i)\nprint('Getting remainders...')\n<mask token>\nfor f in factors:\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(twist_ord, f))\n while ec.scale(u, f) != 0:\n u = ec.scale(u, f)\n shared = ec.scale(u, aPriv)\n for i in range(f):\n if ec.scale(u, i) == shared:\n print('\\tSolved mod %d' % f)\n rems.append(i)\n break\nprint('Correcting parities...')\nfor i in range(len(factors)):\n if rems[i] != 0:\n break\n<mask token>\nfor i in range(len(factors)):\n if i == fixed:\n continue\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))\n if ec.scale(u, factors[fixed]) == 0:\n u = 0\n elif ec.scale(u, factors[i]) == 0:\n u = 0\n shared = ec.scale(u, aPriv)\n r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])\n if ec.scale(u, r) != shared:\n rems[i] = -rems[i] % factors[i]\n",
"step-3": "<mask token>\nec = EC_M(233970423115425145524320034830162017933, 534, 1, 4, order=\n 233970423115425145498902418297807005944)\nassert ec.scale(4, ec.order) == 0\naPriv = randint(1, ec.order - 1)\naPub = ec.scale(4, aPriv)\nprint('Factoring...')\ntwist_ord = 2 * ec.prime + 2 - ec.order\nfactors = []\nx = twist_ord\nfor i in range(2, 2 ** 24):\n if x % i == 0:\n if x % (i * i) != 0:\n factors.append(i)\n x = pp(x, i)\nprint('Getting remainders...')\nrems = []\nfor f in factors:\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(twist_ord, f))\n while ec.scale(u, f) != 0:\n u = ec.scale(u, f)\n shared = ec.scale(u, aPriv)\n for i in range(f):\n if ec.scale(u, i) == shared:\n print('\\tSolved mod %d' % f)\n rems.append(i)\n break\nprint('Correcting parities...')\nfor i in range(len(factors)):\n if rems[i] != 0:\n break\nfixed = i\nfor i in range(len(factors)):\n if i == fixed:\n continue\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))\n if ec.scale(u, factors[fixed]) == 0:\n u = 0\n elif ec.scale(u, factors[i]) == 0:\n u = 0\n shared = ec.scale(u, aPriv)\n r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])\n if ec.scale(u, r) != shared:\n rems[i] = -rems[i] % factors[i]\n",
"step-4": "from matasano import *\nec = EC_M(233970423115425145524320034830162017933, 534, 1, 4, order=\n 233970423115425145498902418297807005944)\nassert ec.scale(4, ec.order) == 0\naPriv = randint(1, ec.order - 1)\naPub = ec.scale(4, aPriv)\nprint('Factoring...')\ntwist_ord = 2 * ec.prime + 2 - ec.order\nfactors = []\nx = twist_ord\nfor i in range(2, 2 ** 24):\n if x % i == 0:\n if x % (i * i) != 0:\n factors.append(i)\n x = pp(x, i)\nprint('Getting remainders...')\nrems = []\nfor f in factors:\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(twist_ord, f))\n while ec.scale(u, f) != 0:\n u = ec.scale(u, f)\n shared = ec.scale(u, aPriv)\n for i in range(f):\n if ec.scale(u, i) == shared:\n print('\\tSolved mod %d' % f)\n rems.append(i)\n break\nprint('Correcting parities...')\nfor i in range(len(factors)):\n if rems[i] != 0:\n break\nfixed = i\nfor i in range(len(factors)):\n if i == fixed:\n continue\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))\n if ec.scale(u, factors[fixed]) == 0:\n u = 0\n elif ec.scale(u, factors[i]) == 0:\n u = 0\n shared = ec.scale(u, aPriv)\n r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])\n if ec.scale(u, r) != shared:\n rems[i] = -rems[i] % factors[i]\n",
"step-5": "from matasano import *\r\n\r\nec = EC_M(233970423115425145524320034830162017933,534,1,4,order=233970423115425145498902418297807005944)\r\nassert(ec.scale(4,ec.order) == 0)\r\n\r\naPriv = randint(1,ec.order-1)\r\naPub = ec.scale(4,aPriv)\r\n\r\nprint(\"Factoring...\")\r\ntwist_ord = 2*ec.prime+2 - ec.order\r\nfactors = []\r\nx = twist_ord\r\nfor i in range(2,2**24):\r\n\tif x%i == 0:\r\n\t\tif x%(i*i) != 0:\r\n\t\t\tfactors.append(i)\r\n\t\tx = pp(x,i)\r\n\t\t\r\nprint(\"Getting remainders...\")\r\nrems = []\r\nfor f in factors:\r\n\tu = 0\r\n\twhile u == 0:\r\n\t\twhile isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):\r\n\t\t\tu = randint(1,ec.prime-1)\r\n\t\tu = ec.scale(u,pp(twist_ord,f))\r\n\twhile ec.scale(u,f) != 0:\r\n\t\tu = ec.scale(u,f)\r\n\tshared = ec.scale(u,aPriv)\t#Not generating the MAC this time\r\n\tfor i in range(f):\r\n\t\tif ec.scale(u,i) == shared:\r\n\t\t\tprint(\"\\tSolved mod %d\"%f)\r\n\t\t\trems.append(i)\r\n\t\t\tbreak\r\n\r\n#Now aPriv is +-rems[i] mod factors[i]\r\n#Do them 2 at a time to get down to 2 values mod Prod factors[i]\r\nprint(\"Correcting parities...\")\r\nfor i in range(len(factors)):\r\n\tif rems[i] != 0:\r\n\t\tbreak\r\nfixed = i\r\nfor i in range(len(factors)):\r\n\tif i == fixed:\r\n\t\tcontinue\r\n\tu = 0\r\n\twhile u == 0:\r\n\t\twhile isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):\r\n\t\t\tu = randint(1,ec.prime-1)\r\n\t\tu = ec.scale(u,pp(pp(twist_ord,factors[fixed]),factors[i]))\r\n\t\tif ec.scale(u,factors[fixed]) == 0:\r\n\t\t\tu = 0\r\n\t\telif ec.scale(u,factors[i]) == 0:\r\n\t\t\tu = 0\r\n\tshared = ec.scale(u,aPriv)\r\n\tr,_ = crt([rems[fixed],rems[i]],[factors[fixed],factors[i]])\r\n\tif ec.scale(u,r) != shared:\r\n\t\trems[i] = (-rems[i])%factors[i]\r\n\t\t\r\n#Now I need to run down the remaining bits\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from utils import *
import math
class State:
"This class represents the search state that will be used for ARA* search"
def __init__(self, x, y, theta, parent=None, parent_action=None, g=float('inf'), h=float('inf')):
self.x = x
self.y = y
self.theta = theta % (2*math.pi)
self.g = g
self.h = h
self.parent = parent
self.parent_action = parent_action
def __eq__(self, other):
if not isinstance(other, State):
return False
return (self.x == other.x) and (self.y == other.y) and (almostEqual(self.theta, other.theta))
def __hash__(self):
deg = round(math.degrees(self.theta))
return hash((self.x, self.y, deg))
def __lt__(self, other):
return self.g < other.g
def setG(self, g):
self.g = g
def setH(self, h):
self.h = h
def setParent(self, parent):
self.parent = parent
def setParentAction(self, parent_action):
self.parent_action = parent_action
|
normal
|
{
"blob_id": "c8f899958ce19e7e2bf1307a685e65873695f140",
"index": 9028,
"step-1": "<mask token>\n\n\nclass State:\n <mask token>\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n <mask token>\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-2": "<mask token>\n\n\nclass State:\n <mask token>\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-3": "<mask token>\n\n\nclass State:\n \"\"\"This class represents the search state that will be used for ARA* search\"\"\"\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-4": "from utils import *\nimport math\n\n\nclass State:\n \"\"\"This class represents the search state that will be used for ARA* search\"\"\"\n\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=\n float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2 * math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return self.x == other.x and self.y == other.y and almostEqual(self\n .theta, other.theta)\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n\n def setH(self, h):\n self.h = h\n\n def setParent(self, parent):\n self.parent = parent\n\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-5": "from utils import *\nimport math\n\nclass State:\n \"This class represents the search state that will be used for ARA* search\"\n def __init__(self, x, y, theta, parent=None, parent_action=None, g=float('inf'), h=float('inf')):\n self.x = x\n self.y = y\n self.theta = theta % (2*math.pi)\n self.g = g\n self.h = h\n self.parent = parent\n self.parent_action = parent_action\n\n def __eq__(self, other):\n if not isinstance(other, State):\n return False\n return (self.x == other.x) and (self.y == other.y) and (almostEqual(self.theta, other.theta))\n\n def __hash__(self):\n deg = round(math.degrees(self.theta))\n return hash((self.x, self.y, deg))\n\n def __lt__(self, other):\n return self.g < other.g\n\n def setG(self, g):\n self.g = g\n def setH(self, h):\n self.h = h\n def setParent(self, parent):\n self.parent = parent\n def setParentAction(self, parent_action):\n self.parent_action = parent_action\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
class MyClass:
<|reserved_special_token_0|>
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def say_hello(self):
self.greet = 'Hello'
def say_hi(self):
print('HI~~~~~')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class MyClass:
name = 'alice'
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def say_hello(self):
self.greet = 'Hello'
def say_hi(self):
print('HI~~~~~')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class MyClass:
name = 'alice'
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def say_hello(self):
self.greet = 'Hello'
def say_hi(self):
print('HI~~~~~')
<|reserved_special_token_0|>
print(p1.name)
p1.set_name('bob')
print(p1.name)
print(p2.name)
p1.say_hello()
print(p1.greet)
MyClass.say_hi('gg')
<|reserved_special_token_1|>
class MyClass:
name = 'alice'
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def say_hello(self):
self.greet = 'Hello'
def say_hi(self):
print('HI~~~~~')
p1 = MyClass()
p2 = MyClass()
print(p1.name)
p1.set_name('bob')
print(p1.name)
print(p2.name)
p1.say_hello()
print(p1.greet)
MyClass.say_hi('gg')
<|reserved_special_token_1|>
class MyClass:
name = "alice"
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def say_hello(self):
self.greet = "Hello"
def say_hi(self):
print("HI~~~~~")
p1 = MyClass()
p2 = MyClass()
print(p1.name)
p1.set_name("bob")
print(p1.name)
print(p2.name)
# 인스턴스 멤버를 적용한후에 그 인스턴스 멤버에 접근 할 수 있다
p1.say_hello()
print(p1.greet)
#클래스 메서드를 클래스. 으로 호출 했기 떄문에 self 파라미터를 하나 넘겨 줘야 한다
MyClass.say_hi("gg")
|
flexible
|
{
"blob_id": "babb5ac680c74e19db5c86c2c3323e8285d169ff",
"index": 9939,
"step-1": "class MyClass:\n <mask token>\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\n",
"step-2": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\n",
"step-3": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\nprint(p1.name)\np1.set_name('bob')\nprint(p1.name)\nprint(p2.name)\np1.say_hello()\nprint(p1.greet)\nMyClass.say_hi('gg')\n",
"step-4": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\np1 = MyClass()\np2 = MyClass()\nprint(p1.name)\np1.set_name('bob')\nprint(p1.name)\nprint(p2.name)\np1.say_hello()\nprint(p1.greet)\nMyClass.say_hi('gg')\n",
"step-5": "class MyClass:\n name = \"alice\"\n \n def set_name(self, name):\n self.name = name\n \n def get_name(self):\n return self.name\n \n def say_hello(self):\n self.greet = \"Hello\"\n \n def say_hi(self):\n print(\"HI~~~~~\")\n \n\n\np1 = MyClass()\np2 = MyClass()\n\nprint(p1.name)\np1.set_name(\"bob\")\nprint(p1.name)\n\nprint(p2.name)\n\n# 인스턴스 멤버를 적용한후에 그 인스턴스 멤버에 접근 할 수 있다\np1.say_hello()\nprint(p1.greet)\n\n#클래스 메서드를 클래스. 으로 호출 했기 떄문에 self 파라미터를 하나 넘겨 줘야 한다 \nMyClass.say_hi(\"gg\")\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env python3
"""Initiates connection to AWSIoT and provides helper functions
deviceshadowhandler.py
by Darren Dunford
"""
import json
import logging
import queue
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
LOGGER = logging.getLogger(__name__)
class DeviceShadowHandler:
def status_post(self, status, state=None):
"""Post status message and device state to AWSIoT and LOGGER
:param status: status string
:param state: optional dictionary to add to shadow reported state
:return:
"""
# create new JSON payload to update device shadow
new_payload = {"state": {"reported": {"status": str(status)}, "desired": None}}
if state:
new_payload.update({"state": {"reported": state}})
# update shadow
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
# log to syslog
LOGGER.info(status)
LOGGER.debug(json.dumps(new_payload))
# constructor
def __init__(self, thingname: str, host: str, root_ca_path: str, private_key_path: str, certificate_path: str):
"""Initiate AWS IoT connection
:param thingname: AWSIoT thing name
:param host: AWSIoT endpoint FQDN
:param root_ca_path: local file path to Amazon root certificate
:param private_key_path: local file path to device private key
:param certificate_path: local file path to device certificate
"""
# Init Shadow Client MQTT connection
self.shadow_client = AWSIoTMQTTShadowClient(thingname)
self.shadow_client.configureEndpoint(host, 8883)
self.shadow_client.configureCredentials(root_ca_path, private_key_path, certificate_path)
# AWSIoTMQTTShadowClient configuration
self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)
self.shadow_client.configureConnectDisconnectTimeout(20) # 20 sec
self.shadow_client.configureMQTTOperationTimeout(20) # 20 sec
# force shadow client to use offline publish queueing
# overriding the default behaviour for shadow clients in the SDK
mqtt_client = self.shadow_client.getMQTTConnection()
mqtt_client.configureOfflinePublishQueueing(-1)
# Connect to AWS IoT with a 300 second keepalive
self.shadow_client.connect(300)
# Create a deviceShadow with persistent subscription and register delta handler
self.shadow_handler = self.shadow_client.createShadowHandlerWithName(thingname, True)
self.shadow_handler.shadowRegisterDeltaCallback(self.custom_shadow_callback_delta)
# initial status post
self.status_post('STARTING')
# dictionary to hold callback responses
self._callbackresponses = {}
# callbacks in this class post events on to this queue
self.event_queue = queue.SimpleQueue()
self.settings = {}
# Custom shadow callback for delta -> remote triggering
def custom_shadow_callback_delta(self, payload: str, response_status, token):
"""
:param payload: JSON string ready to be parsed using json.loads(...)
:param response_status: ignored
:param token: ignored
"""
# DEBUG dump payload in to syslog
LOGGER.debug(payload)
# create JSON dictionary from payload
payload_dict = json.loads(payload)
new_payload = {}
# check for command, if received push event on to queue
if payload_dict.get('state').get('command'):
self.event_queue.put_nowait({"command":payload_dict.get('state').get('command')})
new_payload.update({"state": {"desired": {"command": None}}})
# check for settings, if received push event on to queue
if payload_dict.get('state').get('settings'):
self.event_queue.put_nowait({"settings":payload_dict.get('state').get('settings')})
new_payload.update({"state": {"desired": {"settings": payload_dict.get('state').get('settings')}}})
LOGGER.info("Shadow update: " + json.dumps(new_payload))
# update shadow instance status
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def custom_shadow_callback_get(self, payload, response_status, token):
"""Callback function records response from get shadow operation
:param payload:
:param response_status:
:param token:
:return:
"""
self._callbackresponses.update({token: {"payload": json.loads(payload), "responseStatus": response_status}})
def get_response(self, token):
"""Return prior get shadow operation response
note each response is deleted when returned, i.e. can only be returned once
:param token:
:return:
"""
return self._callbackresponses.pop(token)
# post all parameters as a shadow update
def post_param(self):
new_payload = {"state": {"reported": {"settings": self.settings}, "desired": None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
# post state update to device shadow and, if enabled, syslog
def post_state(self, state):
# create new JSON payload to update device shadow
new_payload = {"state": {"reported": {"status": state}, "desired": None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
# log to syslog
LOGGER.info("New state" + json.dumps(state))
def post_temperature(self, temp):
# create new JSON payload to send device temperature to shadow
new_payload = {"state": {"reported": {"cputemp": temp}}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
# log to syslog on debug only
LOGGER.debug("New temp payload " + json.dumps(new_payload))
|
normal
|
{
"blob_id": "a6d409b806dbd1e174cac65a26c5e8106a8b93ea",
"index": 3760,
"step-1": "<mask token>\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n <mask token>\n <mask token>\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n <mask token>\n\n def post_param(self):\n new_payload = {'state': {'reported': {'settings': self.settings},\n 'desired': None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n\n def post_temperature(self, temp):\n new_payload = {'state': {'reported': {'cputemp': temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.debug('New temp payload ' + json.dumps(new_payload))\n",
"step-3": "<mask token>\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n\n def get_response(self, token):\n \"\"\"Return prior get shadow operation response\n\n note each response is deleted when returned, i.e. can only be returned once\n\n :param token:\n :return:\n \"\"\"\n return self._callbackresponses.pop(token)\n\n def post_param(self):\n new_payload = {'state': {'reported': {'settings': self.settings},\n 'desired': None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n\n def post_temperature(self, temp):\n new_payload = {'state': {'reported': {'cputemp': temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.debug('New temp payload ' + json.dumps(new_payload))\n",
"step-4": "<mask token>\nLOGGER = logging.getLogger(__name__)\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n\n def get_response(self, token):\n \"\"\"Return prior get shadow operation response\n\n note each response is deleted when returned, i.e. can only be returned once\n\n :param token:\n :return:\n \"\"\"\n return self._callbackresponses.pop(token)\n\n def post_param(self):\n new_payload = {'state': {'reported': {'settings': self.settings},\n 'desired': None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n\n def post_temperature(self, temp):\n new_payload = {'state': {'reported': {'cputemp': temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.debug('New temp payload ' + json.dumps(new_payload))\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"Initiates connection to AWSIoT and provides helper functions\n\ndeviceshadowhandler.py\n\nby Darren Dunford\n\"\"\"\n\nimport json\nimport logging\nimport queue\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n\n # create new JSON payload to update device shadow\n new_payload = {\"state\": {\"reported\": {\"status\": str(status)}, \"desired\": None}}\n if state:\n new_payload.update({\"state\": {\"reported\": state}})\n\n # update shadow\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n # constructor\n def __init__(self, thingname: str, host: str, root_ca_path: str, private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n\n # Init Shadow Client MQTT connection\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path, private_key_path, certificate_path)\n\n # AWSIoTMQTTShadowClient configuration\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20) # 20 sec\n self.shadow_client.configureMQTTOperationTimeout(20) # 20 sec\n\n # force shadow client to use offline publish queueing\n # overriding the default behaviour for shadow clients in the SDK\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n\n # Connect to AWS IoT with a 300 second keepalive\n self.shadow_client.connect(300)\n\n # Create a deviceShadow with persistent subscription and register delta handler\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.custom_shadow_callback_delta)\n\n # initial status post\n self.status_post('STARTING')\n\n # dictionary to hold callback responses\n self._callbackresponses = {}\n\n # callbacks in this class post events on to this queue\n self.event_queue = queue.SimpleQueue()\n\n self.settings = {}\n\n # Custom shadow callback for delta -> remote triggering\n def custom_shadow_callback_delta(self, payload: str, response_status, token):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n\n # DEBUG dump payload in to syslog\n LOGGER.debug(payload)\n\n # create JSON dictionary from payload\n payload_dict = json.loads(payload)\n new_payload = {}\n\n # check for command, if received push event on to queue\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({\"command\":payload_dict.get('state').get('command')})\n new_payload.update({\"state\": {\"desired\": {\"command\": None}}})\n\n # check for settings, if received push event on to queue\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({\"settings\":payload_dict.get('state').get('settings')})\n new_payload.update({\"state\": {\"desired\": {\"settings\": payload_dict.get('state').get('settings')}}})\n\n LOGGER.info(\"Shadow update: \" + json.dumps(new_payload))\n\n # update shadow instance status\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {\"payload\": json.loads(payload), \"responseStatus\": response_status}})\n\n def get_response(self, token):\n \"\"\"Return prior get shadow operation response\n\n note each response is deleted when returned, i.e. can only be returned once\n\n :param token:\n :return:\n \"\"\"\n return self._callbackresponses.pop(token)\n\n # post all parameters as a shadow update\n def post_param(self):\n new_payload = {\"state\": {\"reported\": {\"settings\": self.settings}, \"desired\": None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n # post state update to device shadow and, if enabled, syslog\n def post_state(self, state):\n\n # create new JSON payload to update device shadow\n new_payload = {\"state\": {\"reported\": {\"status\": state}, \"desired\": None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog\n LOGGER.info(\"New state\" + json.dumps(state))\n\n def post_temperature(self, temp):\n\n # create new JSON payload to send device temperature to shadow\n new_payload = {\"state\": {\"reported\": {\"cputemp\": temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog on debug only\n LOGGER.debug(\"New temp payload \" + json.dumps(new_payload))\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
<|reserved_special_token_0|>
def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):
"""
Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file
Need a <filename> to work with. Include a path to a dump directory, if desired
:param pdb_names: list(pdb names)
:param pdb_sequences: list(pdb sequences)
:param filename: str(filename)
:return: Bool
"""
if len(pdb_names) != len(pdb_sequences):
return False
if not filename.endswith('.txt'):
filename += '.txt'
with open(filename, 'w') as fh:
for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):
fh.write('>%s\n%s\n') % (pdb_name, pdb_seq)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_sequence(pose, res_nums=None):
"""
Return the sequence of the <pose>, or, return the sequence listed in <res_nums>
:param pose: Pose
:param res_nums: list() of Pose residue numbers
:return: str(Pose sequence)
"""
if res_nums is None:
return str(pose.sequence())
else:
return str(''.join([pose.residue(r).name1() for r in res_nums]))
<|reserved_special_token_0|>
def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):
"""
Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file
Need a <filename> to work with. Include a path to a dump directory, if desired
:param pdb_names: list(pdb names)
:param pdb_sequences: list(pdb sequences)
:param filename: str(filename)
:return: Bool
"""
if len(pdb_names) != len(pdb_sequences):
return False
if not filename.endswith('.txt'):
filename += '.txt'
with open(filename, 'w') as fh:
for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):
fh.write('>%s\n%s\n') % (pdb_name, pdb_seq)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_sequence(pose, res_nums=None):
"""
Return the sequence of the <pose>, or, return the sequence listed in <res_nums>
:param pose: Pose
:param res_nums: list() of Pose residue numbers
:return: str(Pose sequence)
"""
if res_nums is None:
return str(pose.sequence())
else:
return str(''.join([pose.residue(r).name1() for r in res_nums]))
def get_atom_pair_distance(pose, res1, atom1, res2, atom2):
"""
Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>
:param pose: Pose
:param res1: int(residue number)
:param atom1: int(atom number)
:param res2: int(residue number)
:param atom2: int(atom number)
:return: float(xyz distance)
"""
atom1 = pose.residue(res1).atom(atom1)
atom2 = pose.residue(res2).atom(atom2)
return float(atom1.xyz().distance(atom2.xyz()))
def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):
"""
Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file
Need a <filename> to work with. Include a path to a dump directory, if desired
:param pdb_names: list(pdb names)
:param pdb_sequences: list(pdb sequences)
:param filename: str(filename)
:return: Bool
"""
if len(pdb_names) != len(pdb_sequences):
return False
if not filename.endswith('.txt'):
filename += '.txt'
with open(filename, 'w') as fh:
for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):
fh.write('>%s\n%s\n') % (pdb_name, pdb_seq)
<|reserved_special_token_1|>
__author__ = 'morganlnance'
<|reserved_special_token_0|>
def get_sequence(pose, res_nums=None):
"""
Return the sequence of the <pose>, or, return the sequence listed in <res_nums>
:param pose: Pose
:param res_nums: list() of Pose residue numbers
:return: str(Pose sequence)
"""
if res_nums is None:
return str(pose.sequence())
else:
return str(''.join([pose.residue(r).name1() for r in res_nums]))
def get_atom_pair_distance(pose, res1, atom1, res2, atom2):
"""
Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>
:param pose: Pose
:param res1: int(residue number)
:param atom1: int(atom number)
:param res2: int(residue number)
:param atom2: int(atom number)
:return: float(xyz distance)
"""
atom1 = pose.residue(res1).atom(atom1)
atom2 = pose.residue(res2).atom(atom2)
return float(atom1.xyz().distance(atom2.xyz()))
def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):
"""
Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file
Need a <filename> to work with. Include a path to a dump directory, if desired
:param pdb_names: list(pdb names)
:param pdb_sequences: list(pdb sequences)
:param filename: str(filename)
:return: Bool
"""
if len(pdb_names) != len(pdb_sequences):
return False
if not filename.endswith('.txt'):
filename += '.txt'
with open(filename, 'w') as fh:
for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):
fh.write('>%s\n%s\n') % (pdb_name, pdb_seq)
<|reserved_special_token_1|>
#!/usr/bin/python
__author__ = "morganlnance"
'''
Analysis functions using PyRosetta4
'''
def get_sequence(pose, res_nums=None):
# type: (Pose, list) -> str
"""
Return the sequence of the <pose>, or, return the sequence listed in <res_nums>
:param pose: Pose
:param res_nums: list() of Pose residue numbers
:return: str(Pose sequence)
"""
# if no res_nums were given, return the pose's sequence
if res_nums is None:
return str(pose.sequence())
# else, return the sequence of the specified res_nums
else:
return str(''.join([pose.residue(r).name1() for r in res_nums]))
def get_atom_pair_distance(pose, res1, atom1, res2, atom2):
"""
Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>
:param pose: Pose
:param res1: int(residue number)
:param atom1: int(atom number)
:param res2: int(residue number)
:param atom2: int(atom number)
:return: float(xyz distance)
"""
# pull out the atom objects from pose
atom1 = pose.residue(res1).atom(atom1)
atom2 = pose.residue(res2).atom(atom2)
# calculate and return the distance between atom1 and atom2
return float(atom1.xyz().distance(atom2.xyz()))
def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):
"""
Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file
Need a <filename> to work with. Include a path to a dump directory, if desired
:param pdb_names: list(pdb names)
:param pdb_sequences: list(pdb sequences)
:param filename: str(filename)
:return: Bool
"""
# ensure that the pdb_names and pdb_sequences lists are the same length
if len(pdb_names) != len(pdb_sequences):
return False
# add .txt to the filename, if needed
if not filename.endswith(".txt"):
filename += ".txt"
# write out the fasta file
with open(filename, 'w') as fh:
for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):
fh.write(">%s\n%s\n") %(pdb_name, pdb_seq)
|
flexible
|
{
"blob_id": "876e9f03c908338a247b6bf1f23011e609bbc2a5",
"index": 8739,
"step-1": "<mask token>\n\n\ndef write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):\n \"\"\"\n Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file\n Need a <filename> to work with. Include a path to a dump directory, if desired\n :param pdb_names: list(pdb names)\n :param pdb_sequences: list(pdb sequences)\n :param filename: str(filename)\n :return: Bool\n \"\"\"\n if len(pdb_names) != len(pdb_sequences):\n return False\n if not filename.endswith('.txt'):\n filename += '.txt'\n with open(filename, 'w') as fh:\n for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):\n fh.write('>%s\\n%s\\n') % (pdb_name, pdb_seq)\n",
"step-2": "<mask token>\n\n\ndef get_sequence(pose, res_nums=None):\n \"\"\"\n Return the sequence of the <pose>, or, return the sequence listed in <res_nums>\n :param pose: Pose\n :param res_nums: list() of Pose residue numbers\n :return: str(Pose sequence)\n \"\"\"\n if res_nums is None:\n return str(pose.sequence())\n else:\n return str(''.join([pose.residue(r).name1() for r in res_nums]))\n\n\n<mask token>\n\n\ndef write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):\n \"\"\"\n Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file\n Need a <filename> to work with. Include a path to a dump directory, if desired\n :param pdb_names: list(pdb names)\n :param pdb_sequences: list(pdb sequences)\n :param filename: str(filename)\n :return: Bool\n \"\"\"\n if len(pdb_names) != len(pdb_sequences):\n return False\n if not filename.endswith('.txt'):\n filename += '.txt'\n with open(filename, 'w') as fh:\n for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):\n fh.write('>%s\\n%s\\n') % (pdb_name, pdb_seq)\n",
"step-3": "<mask token>\n\n\ndef get_sequence(pose, res_nums=None):\n \"\"\"\n Return the sequence of the <pose>, or, return the sequence listed in <res_nums>\n :param pose: Pose\n :param res_nums: list() of Pose residue numbers\n :return: str(Pose sequence)\n \"\"\"\n if res_nums is None:\n return str(pose.sequence())\n else:\n return str(''.join([pose.residue(r).name1() for r in res_nums]))\n\n\ndef get_atom_pair_distance(pose, res1, atom1, res2, atom2):\n \"\"\"\n Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>\n :param pose: Pose\n :param res1: int(residue number)\n :param atom1: int(atom number)\n :param res2: int(residue number)\n :param atom2: int(atom number)\n :return: float(xyz distance)\n \"\"\"\n atom1 = pose.residue(res1).atom(atom1)\n atom2 = pose.residue(res2).atom(atom2)\n return float(atom1.xyz().distance(atom2.xyz()))\n\n\ndef write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):\n \"\"\"\n Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file\n Need a <filename> to work with. Include a path to a dump directory, if desired\n :param pdb_names: list(pdb names)\n :param pdb_sequences: list(pdb sequences)\n :param filename: str(filename)\n :return: Bool\n \"\"\"\n if len(pdb_names) != len(pdb_sequences):\n return False\n if not filename.endswith('.txt'):\n filename += '.txt'\n with open(filename, 'w') as fh:\n for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):\n fh.write('>%s\\n%s\\n') % (pdb_name, pdb_seq)\n",
"step-4": "__author__ = 'morganlnance'\n<mask token>\n\n\ndef get_sequence(pose, res_nums=None):\n \"\"\"\n Return the sequence of the <pose>, or, return the sequence listed in <res_nums>\n :param pose: Pose\n :param res_nums: list() of Pose residue numbers\n :return: str(Pose sequence)\n \"\"\"\n if res_nums is None:\n return str(pose.sequence())\n else:\n return str(''.join([pose.residue(r).name1() for r in res_nums]))\n\n\ndef get_atom_pair_distance(pose, res1, atom1, res2, atom2):\n \"\"\"\n Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>\n :param pose: Pose\n :param res1: int(residue number)\n :param atom1: int(atom number)\n :param res2: int(residue number)\n :param atom2: int(atom number)\n :return: float(xyz distance)\n \"\"\"\n atom1 = pose.residue(res1).atom(atom1)\n atom2 = pose.residue(res2).atom(atom2)\n return float(atom1.xyz().distance(atom2.xyz()))\n\n\ndef write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):\n \"\"\"\n Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file\n Need a <filename> to work with. Include a path to a dump directory, if desired\n :param pdb_names: list(pdb names)\n :param pdb_sequences: list(pdb sequences)\n :param filename: str(filename)\n :return: Bool\n \"\"\"\n if len(pdb_names) != len(pdb_sequences):\n return False\n if not filename.endswith('.txt'):\n filename += '.txt'\n with open(filename, 'w') as fh:\n for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):\n fh.write('>%s\\n%s\\n') % (pdb_name, pdb_seq)\n",
"step-5": "#!/usr/bin/python\n__author__ = \"morganlnance\"\n\n'''\nAnalysis functions using PyRosetta4\n'''\n\n\ndef get_sequence(pose, res_nums=None):\n # type: (Pose, list) -> str\n \"\"\"\n Return the sequence of the <pose>, or, return the sequence listed in <res_nums>\n :param pose: Pose\n :param res_nums: list() of Pose residue numbers\n :return: str(Pose sequence)\n \"\"\"\n # if no res_nums were given, return the pose's sequence\n if res_nums is None:\n return str(pose.sequence())\n # else, return the sequence of the specified res_nums\n else:\n return str(''.join([pose.residue(r).name1() for r in res_nums]))\n\n\ndef get_atom_pair_distance(pose, res1, atom1, res2, atom2):\n \"\"\"\n Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>\n :param pose: Pose\n :param res1: int(residue number)\n :param atom1: int(atom number)\n :param res2: int(residue number)\n :param atom2: int(atom number)\n :return: float(xyz distance)\n \"\"\"\n # pull out the atom objects from pose\n atom1 = pose.residue(res1).atom(atom1)\n atom2 = pose.residue(res2).atom(atom2)\n\n # calculate and return the distance between atom1 and atom2\n return float(atom1.xyz().distance(atom2.xyz()))\n\n\ndef write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):\n \"\"\"\n Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file\n Need a <filename> to work with. Include a path to a dump directory, if desired\n :param pdb_names: list(pdb names)\n :param pdb_sequences: list(pdb sequences)\n :param filename: str(filename)\n :return: Bool\n \"\"\"\n # ensure that the pdb_names and pdb_sequences lists are the same length\n if len(pdb_names) != len(pdb_sequences):\n return False\n\n # add .txt to the filename, if needed\n if not filename.endswith(\".txt\"):\n filename += \".txt\"\n\n # write out the fasta file\n with open(filename, 'w') as fh:\n for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):\n fh.write(\">%s\\n%s\\n\") %(pdb_name, pdb_seq)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
import z3
digit_search = re.compile('\-?\d+')
def get_sensor_beacon(data_in):
sensors = {}
beacons = set()
for line in data_in:
s_x, s_y, b_x, b_y = list(map(int, digit_search.findall(line)))
sensors[(s_x, s_y)] = abs(s_x - b_x) + abs(s_y - b_y)
beacons.add((b_x, b_y))
return sensors, beacons
def manhat(point_one, point_two):
return abs(point_one[0] - point_two[0]) + abs(point_one[1] - point_two[1])
def find_edge(sensors, pos, dir):
x, row = pos
closer = []
for sensor in sensors.keys():
if manhat(pos, sensor) <= sensors[sensor]:
closer.append(sensor)
if dir > 0:
edgiest = [sensor for sensor in sensors.keys() if sensor[0] == max([x for x, y in closer])][0]
elif dir < 0:
edgiest = [sensor for sensor in sensors.keys() if sensor[0] == min([x for x, y in closer])][0]
if dir > 0:
if pos[0] > edgiest[0] and max([sensors[point] - manhat(pos, point) for point in closer]) == 0:
return x
elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:
new_x = x + max([1, (sensors[edgiest] - manhat(pos, edgiest))]) * dir
return find_edge(sensors, (new_x, row), dir)
elif dir < 0:
if pos[0] < edgiest[0] and max([sensors[point] - manhat(pos, point) for point in closer]) == 0:
return x
elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:
new_x = x + max([1, (sensors[edgiest] - manhat(pos, edgiest))]) * dir
return find_edge(sensors, (new_x, row), dir)
else:
raise Exception("This shouldn't be happening. We've gone too far!")
def no_beacon_row(sensors, beacons, row):
start_x = int(sum([y for x,y in sensors.keys()])/len(sensors.keys()))
beacons_on_row = len([beacon for beacon in beacons if beacon[1] == row])
# print(start_x)
# print(beacons_on_row)
# print(find_edge(sensors, (start_x, row), 1), find_edge(sensors, (start_x, row), -1))
return find_edge(sensors, (start_x, row), 1) - find_edge(sensors, (start_x, row), -1) - beacons_on_row + 1
# airlifted and modified to fit from u/hugh_tc https://www.reddit.com/r/adventofcode/comments/zmcn64/2022_day_15_solutions/j0af5cy/
def part_two(data_in):
s = z3.Solver()
x = z3.Int("x")
y = z3.Int("y")
s.add(0 <= x)
s.add(x <= 4000000)
s.add(0 <= y)
s.add(y <= 4000000)
def z3_abs(x):
return z3.If(x >= 0, x, -x)
for line in data:
sx, sy, bx, by = [int(x) for x in digit_search.findall(line)]
m = abs(sx - bx) + abs(sy - by)
s.add(z3_abs(sx - x) + z3_abs(sy - y) > m)
s.check()
outx, outy = s.model()[x].as_long(), s.model()[y].as_long()
return outx * 4000000 + outy
with open("day15.txt" , "r") as f:
data = f.read().split('\n')
sensor_list, beacon_list = get_sensor_beacon(data)
print("Part One:", no_beacon_row(sensor_list, beacon_list, 2000000))
print("Part Two:", part_two(data))
|
normal
|
{
"blob_id": "c4bd55be86c1f55d89dfcbba2ccde4f3b132edcb",
"index": 9981,
"step-1": "<mask token>\n\n\ndef manhat(point_one, point_two):\n return abs(point_one[0] - point_two[0]) + abs(point_one[1] - point_two[1])\n\n\ndef find_edge(sensors, pos, dir):\n x, row = pos\n closer = []\n for sensor in sensors.keys():\n if manhat(pos, sensor) <= sensors[sensor]:\n closer.append(sensor)\n if dir > 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == max(\n [x for x, y in closer])][0]\n elif dir < 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == min(\n [x for x, y in closer])][0]\n if dir > 0:\n if pos[0] > edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n elif dir < 0:\n if pos[0] < edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n else:\n raise Exception(\"This shouldn't be happening. We've gone too far!\")\n\n\n<mask token>\n\n\ndef part_two(data_in):\n s = z3.Solver()\n x = z3.Int('x')\n y = z3.Int('y')\n s.add(0 <= x)\n s.add(x <= 4000000)\n s.add(0 <= y)\n s.add(y <= 4000000)\n\n def z3_abs(x):\n return z3.If(x >= 0, x, -x)\n for line in data:\n sx, sy, bx, by = [int(x) for x in digit_search.findall(line)]\n m = abs(sx - bx) + abs(sy - by)\n s.add(z3_abs(sx - x) + z3_abs(sy - y) > m)\n s.check()\n outx, outy = s.model()[x].as_long(), s.model()[y].as_long()\n return outx * 4000000 + outy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef manhat(point_one, point_two):\n return abs(point_one[0] - point_two[0]) + abs(point_one[1] - point_two[1])\n\n\ndef find_edge(sensors, pos, dir):\n x, row = pos\n closer = []\n for sensor in sensors.keys():\n if manhat(pos, sensor) <= sensors[sensor]:\n closer.append(sensor)\n if dir > 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == max(\n [x for x, y in closer])][0]\n elif dir < 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == min(\n [x for x, y in closer])][0]\n if dir > 0:\n if pos[0] > edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n elif dir < 0:\n if pos[0] < edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n else:\n raise Exception(\"This shouldn't be happening. We've gone too far!\")\n\n\ndef no_beacon_row(sensors, beacons, row):\n start_x = int(sum([y for x, y in sensors.keys()]) / len(sensors.keys()))\n beacons_on_row = len([beacon for beacon in beacons if beacon[1] == row])\n return find_edge(sensors, (start_x, row), 1) - find_edge(sensors, (\n start_x, row), -1) - beacons_on_row + 1\n\n\ndef part_two(data_in):\n s = z3.Solver()\n x = z3.Int('x')\n y = z3.Int('y')\n s.add(0 <= x)\n s.add(x <= 4000000)\n s.add(0 <= y)\n s.add(y <= 4000000)\n\n def z3_abs(x):\n return z3.If(x >= 0, x, -x)\n for line in data:\n sx, sy, bx, by = [int(x) for x in digit_search.findall(line)]\n m = abs(sx - bx) + abs(sy - by)\n s.add(z3_abs(sx - x) + z3_abs(sy - y) > m)\n s.check()\n outx, outy = s.model()[x].as_long(), s.model()[y].as_long()\n return outx * 4000000 + outy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_sensor_beacon(data_in):\n sensors = {}\n beacons = set()\n for line in data_in:\n s_x, s_y, b_x, b_y = list(map(int, digit_search.findall(line)))\n sensors[s_x, s_y] = abs(s_x - b_x) + abs(s_y - b_y)\n beacons.add((b_x, b_y))\n return sensors, beacons\n\n\ndef manhat(point_one, point_two):\n return abs(point_one[0] - point_two[0]) + abs(point_one[1] - point_two[1])\n\n\ndef find_edge(sensors, pos, dir):\n x, row = pos\n closer = []\n for sensor in sensors.keys():\n if manhat(pos, sensor) <= sensors[sensor]:\n closer.append(sensor)\n if dir > 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == max(\n [x for x, y in closer])][0]\n elif dir < 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == min(\n [x for x, y in closer])][0]\n if dir > 0:\n if pos[0] > edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n elif dir < 0:\n if pos[0] < edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n else:\n raise Exception(\"This shouldn't be happening. We've gone too far!\")\n\n\ndef no_beacon_row(sensors, beacons, row):\n start_x = int(sum([y for x, y in sensors.keys()]) / len(sensors.keys()))\n beacons_on_row = len([beacon for beacon in beacons if beacon[1] == row])\n return find_edge(sensors, (start_x, row), 1) - find_edge(sensors, (\n start_x, row), -1) - beacons_on_row + 1\n\n\ndef part_two(data_in):\n s = z3.Solver()\n x = z3.Int('x')\n y = z3.Int('y')\n s.add(0 <= x)\n s.add(x <= 4000000)\n s.add(0 <= y)\n s.add(y <= 4000000)\n\n def z3_abs(x):\n return z3.If(x >= 0, x, -x)\n for line in data:\n sx, sy, bx, by = [int(x) for x in digit_search.findall(line)]\n m = abs(sx - bx) + abs(sy - by)\n s.add(z3_abs(sx - x) + z3_abs(sy - y) > m)\n s.check()\n outx, outy = s.model()[x].as_long(), s.model()[y].as_long()\n return outx * 4000000 + outy\n\n\n<mask token>\n",
"step-4": "<mask token>\ndigit_search = re.compile('\\\\-?\\\\d+')\n\n\ndef get_sensor_beacon(data_in):\n sensors = {}\n beacons = set()\n for line in data_in:\n s_x, s_y, b_x, b_y = list(map(int, digit_search.findall(line)))\n sensors[s_x, s_y] = abs(s_x - b_x) + abs(s_y - b_y)\n beacons.add((b_x, b_y))\n return sensors, beacons\n\n\ndef manhat(point_one, point_two):\n return abs(point_one[0] - point_two[0]) + abs(point_one[1] - point_two[1])\n\n\ndef find_edge(sensors, pos, dir):\n x, row = pos\n closer = []\n for sensor in sensors.keys():\n if manhat(pos, sensor) <= sensors[sensor]:\n closer.append(sensor)\n if dir > 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == max(\n [x for x, y in closer])][0]\n elif dir < 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == min(\n [x for x, y in closer])][0]\n if dir > 0:\n if pos[0] > edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n elif dir < 0:\n if pos[0] < edgiest[0] and max([(sensors[point] - manhat(pos, point\n )) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, sensors[edgiest] - manhat(pos, edgiest)]) * dir\n return find_edge(sensors, (new_x, row), dir)\n else:\n raise Exception(\"This shouldn't be happening. We've gone too far!\")\n\n\ndef no_beacon_row(sensors, beacons, row):\n start_x = int(sum([y for x, y in sensors.keys()]) / len(sensors.keys()))\n beacons_on_row = len([beacon for beacon in beacons if beacon[1] == row])\n return find_edge(sensors, (start_x, row), 1) - find_edge(sensors, (\n start_x, row), -1) - beacons_on_row + 1\n\n\ndef part_two(data_in):\n s = z3.Solver()\n x = z3.Int('x')\n y = z3.Int('y')\n s.add(0 <= x)\n s.add(x <= 4000000)\n s.add(0 <= y)\n s.add(y <= 4000000)\n\n def z3_abs(x):\n return z3.If(x >= 0, x, -x)\n for line in data:\n sx, sy, bx, by = [int(x) for x in digit_search.findall(line)]\n m = abs(sx - bx) + abs(sy - by)\n s.add(z3_abs(sx - x) + z3_abs(sy - y) > m)\n s.check()\n outx, outy = s.model()[x].as_long(), s.model()[y].as_long()\n return outx * 4000000 + outy\n\n\nwith open('day15.txt', 'r') as f:\n data = f.read().split('\\n')\nsensor_list, beacon_list = get_sensor_beacon(data)\nprint('Part One:', no_beacon_row(sensor_list, beacon_list, 2000000))\nprint('Part Two:', part_two(data))\n",
"step-5": "import re\nimport z3\ndigit_search = re.compile('\\-?\\d+')\n\ndef get_sensor_beacon(data_in):\n sensors = {}\n beacons = set()\n for line in data_in:\n s_x, s_y, b_x, b_y = list(map(int, digit_search.findall(line)))\n sensors[(s_x, s_y)] = abs(s_x - b_x) + abs(s_y - b_y)\n beacons.add((b_x, b_y))\n return sensors, beacons\n\ndef manhat(point_one, point_two):\n return abs(point_one[0] - point_two[0]) + abs(point_one[1] - point_two[1])\n\ndef find_edge(sensors, pos, dir):\n x, row = pos\n closer = []\n for sensor in sensors.keys():\n if manhat(pos, sensor) <= sensors[sensor]:\n closer.append(sensor)\n if dir > 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == max([x for x, y in closer])][0]\n elif dir < 0:\n edgiest = [sensor for sensor in sensors.keys() if sensor[0] == min([x for x, y in closer])][0]\n if dir > 0:\n if pos[0] > edgiest[0] and max([sensors[point] - manhat(pos, point) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, (sensors[edgiest] - manhat(pos, edgiest))]) * dir\n return find_edge(sensors, (new_x, row), dir)\n elif dir < 0:\n if pos[0] < edgiest[0] and max([sensors[point] - manhat(pos, point) for point in closer]) == 0:\n return x\n elif len(closer) > 1 or manhat(pos, edgiest) < sensors[edgiest]:\n new_x = x + max([1, (sensors[edgiest] - manhat(pos, edgiest))]) * dir\n return find_edge(sensors, (new_x, row), dir)\n else:\n raise Exception(\"This shouldn't be happening. We've gone too far!\")\n\n\ndef no_beacon_row(sensors, beacons, row):\n start_x = int(sum([y for x,y in sensors.keys()])/len(sensors.keys()))\n beacons_on_row = len([beacon for beacon in beacons if beacon[1] == row])\n # print(start_x)\n # print(beacons_on_row)\n # print(find_edge(sensors, (start_x, row), 1), find_edge(sensors, (start_x, row), -1))\n return find_edge(sensors, (start_x, row), 1) - find_edge(sensors, (start_x, row), -1) - beacons_on_row + 1\n\n# airlifted and modified to fit from u/hugh_tc https://www.reddit.com/r/adventofcode/comments/zmcn64/2022_day_15_solutions/j0af5cy/\ndef part_two(data_in):\n s = z3.Solver()\n x = z3.Int(\"x\")\n y = z3.Int(\"y\")\n s.add(0 <= x)\n s.add(x <= 4000000)\n s.add(0 <= y)\n s.add(y <= 4000000)\n def z3_abs(x):\n return z3.If(x >= 0, x, -x)\n for line in data:\n sx, sy, bx, by = [int(x) for x in digit_search.findall(line)]\n m = abs(sx - bx) + abs(sy - by)\n s.add(z3_abs(sx - x) + z3_abs(sy - y) > m)\n s.check()\n outx, outy = s.model()[x].as_long(), s.model()[y].as_long()\n return outx * 4000000 + outy\n\nwith open(\"day15.txt\" , \"r\") as f:\n data = f.read().split('\\n')\n\nsensor_list, beacon_list = get_sensor_beacon(data)\nprint(\"Part One:\", no_beacon_row(sensor_list, beacon_list, 2000000))\nprint(\"Part Two:\", part_two(data))\n\n",
"step-ids": [
3,
4,
5,
7,
9
]
}
|
[
3,
4,
5,
7,
9
] |
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from dataset.mnist import load_mnist
from controller import Controller
# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
# instance
controller = Controller()
# accuracy
trycount = 1000
accuracy_cnt = 0
result = np.zeros((10, 10))
for i in range(len(x_test)):
p = controller.accuracy(x_test[i])
a = np.argmax(t_test[i])
#print("p = " + str(p))
#print("a = " + str(a))
result[p][a] += 1
#print(t_test[i])
if p == a:
accuracy_cnt += 1
if (i == trycount):
break
print("Accuracy:" + str(float(accuracy_cnt) / trycount))
print(result)
|
normal
|
{
"blob_id": "c2d8e34ab0b449a971c920fc86f259f093f16cc5",
"index": 7156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\nfor i in range(len(x_test)):\n p = controller.accuracy(x_test[i])\n a = np.argmax(t_test[i])\n result[p][a] += 1\n if p == a:\n accuracy_cnt += 1\n if i == trycount:\n break\nprint('Accuracy:' + str(float(accuracy_cnt) / trycount))\nprint(result)\n",
"step-3": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,\n one_hot_label=True)\ncontroller = Controller()\ntrycount = 1000\naccuracy_cnt = 0\nresult = np.zeros((10, 10))\nfor i in range(len(x_test)):\n p = controller.accuracy(x_test[i])\n a = np.argmax(t_test[i])\n result[p][a] += 1\n if p == a:\n accuracy_cnt += 1\n if i == trycount:\n break\nprint('Accuracy:' + str(float(accuracy_cnt) / trycount))\nprint(result)\n",
"step-4": "import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom controller import Controller\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,\n one_hot_label=True)\ncontroller = Controller()\ntrycount = 1000\naccuracy_cnt = 0\nresult = np.zeros((10, 10))\nfor i in range(len(x_test)):\n p = controller.accuracy(x_test[i])\n a = np.argmax(t_test[i])\n result[p][a] += 1\n if p == a:\n accuracy_cnt += 1\n if i == trycount:\n break\nprint('Accuracy:' + str(float(accuracy_cnt) / trycount))\nprint(result)\n",
"step-5": "import sys, os\nsys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom controller import Controller\n\n# データの読み込み\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)\n\n# instance\ncontroller = Controller()\n\n# accuracy\ntrycount = 1000\naccuracy_cnt = 0\nresult = np.zeros((10, 10))\n\nfor i in range(len(x_test)):\n p = controller.accuracy(x_test[i])\n a = np.argmax(t_test[i])\n\n #print(\"p = \" + str(p))\n #print(\"a = \" + str(a))\n result[p][a] += 1\n #print(t_test[i])\n if p == a:\n accuracy_cnt += 1\n\n if (i == trycount):\n break\nprint(\"Accuracy:\" + str(float(accuracy_cnt) / trycount))\nprint(result)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# ---------------------------------------------------------------------
# Iskratel.ESCOM.get_version
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetversion import IGetVersion
class Script(BaseScript):
name = "Iskratel.ESCOM.get_version"
cache = True
interface = IGetVersion
rx_ver = re.compile(
r"^\s*SW version\s+(?P<version>\S+).*\n"
r"^\s*Boot version\s+(?P<bootprom>\S+).*\n"
r"^\s*HW version\s+(?P<hardware>\S+).*\n",
re.MULTILINE,
)
rx_ver1 = re.compile(
r"^\s+1\s+(?P<version>\S+)\s+(?P<bootprom>\S+)\s+(?P<hardware>\S+)", re.MULTILINE
)
rx_ver_escom_l = re.compile(
r"SI3000 ESCOM L Series Software,\s*Version\s(?P<version>\S+) Build (?P<version_build>\S+),",
re.MULTILINE,
)
rx_hw_escom_l = re.compile(
r"ROM:\s*System Bootstrap, Version\s*(?P<bootprom>\S+),\s*hardware version:\s*(?P<hardware>\S+)\n"
r"Serial num:(?P<serial>\S+), ID num:(?P<id_number>\S+)\n"
r"System image file is \"(?P<image>\S+)\"",
re.MULTILINE,
)
rx_platform = re.compile(r"^\s*System Description:\s+(?P<platform>.+)\n", re.MULTILINE)
rx_platform1 = re.compile(r"^\s+1\s+(?P<platform>\S+)\s*\n", re.MULTILINE)
rx_serial = re.compile(r"^\s*Serial number : (?P<serial>\S+)")
def execute_cli(self, **kwargs):
v = self.cli("show version", cached=True)
for platform, ver in [
("ESCOM L", self.rx_ver_escom_l),
("ESCOM", self.rx_ver),
("ESCOM", self.rx_ver1),
]:
match = ver.search(v)
if match:
break
else:
raise NotImplementedError
if platform == "ESCOM L":
hw_match = self.rx_hw_escom_l.search(v)
return {
"vendor": "Iskratel",
"version": match.group("version"),
"platform": platform,
"image": hw_match.group("image"),
"attributes": {
"Boot PROM": hw_match.group("bootprom"),
"HW version": hw_match.group("hardware"),
"Serial Number": hw_match.group("serial"),
},
}
r = {
"vendor": "Iskratel",
"version": match.group("version"),
"attributes": {
"Boot PROM": match.group("bootprom"),
"HW version": match.group("hardware"),
},
}
v = self.cli("show system", cached=True)
match = self.rx_platform.search(v)
if not match:
match = self.rx_platform1.search(v)
r["platform"] = match.group("platform")
v = self.cli("show system id", cached=True)
match = self.rx_serial.search(v)
if match:
r["attributes"]["Serial Number"] = match.group("serial")
return r
|
normal
|
{
"blob_id": "40b3c403f99044eb61740d62eda15ddd08b0f739",
"index": 1980,
"step-1": "<mask token>\n\n\nclass Script(BaseScript):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Script(BaseScript):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n",
"step-3": "<mask token>\n\n\nclass Script(BaseScript):\n name = 'Iskratel.ESCOM.get_version'\n cache = True\n interface = IGetVersion\n rx_ver = re.compile(\n '^\\\\s*SW version\\\\s+(?P<version>\\\\S+).*\\\\n^\\\\s*Boot version\\\\s+(?P<bootprom>\\\\S+).*\\\\n^\\\\s*HW version\\\\s+(?P<hardware>\\\\S+).*\\\\n'\n , re.MULTILINE)\n rx_ver1 = re.compile(\n '^\\\\s+1\\\\s+(?P<version>\\\\S+)\\\\s+(?P<bootprom>\\\\S+)\\\\s+(?P<hardware>\\\\S+)'\n , re.MULTILINE)\n rx_ver_escom_l = re.compile(\n 'SI3000 ESCOM L Series Software,\\\\s*Version\\\\s(?P<version>\\\\S+) Build (?P<version_build>\\\\S+),'\n , re.MULTILINE)\n rx_hw_escom_l = re.compile(\n 'ROM:\\\\s*System Bootstrap, Version\\\\s*(?P<bootprom>\\\\S+),\\\\s*hardware version:\\\\s*(?P<hardware>\\\\S+)\\\\nSerial num:(?P<serial>\\\\S+), ID num:(?P<id_number>\\\\S+)\\\\nSystem image file is \\\\\"(?P<image>\\\\S+)\\\\\"'\n , re.MULTILINE)\n rx_platform = re.compile('^\\\\s*System Description:\\\\s+(?P<platform>.+)\\\\n',\n re.MULTILINE)\n rx_platform1 = re.compile('^\\\\s+1\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n', re.\n MULTILINE)\n rx_serial = re.compile('^\\\\s*Serial number : (?P<serial>\\\\S+)')\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n",
"step-4": "import re\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = 'Iskratel.ESCOM.get_version'\n cache = True\n interface = IGetVersion\n rx_ver = re.compile(\n '^\\\\s*SW version\\\\s+(?P<version>\\\\S+).*\\\\n^\\\\s*Boot version\\\\s+(?P<bootprom>\\\\S+).*\\\\n^\\\\s*HW version\\\\s+(?P<hardware>\\\\S+).*\\\\n'\n , re.MULTILINE)\n rx_ver1 = re.compile(\n '^\\\\s+1\\\\s+(?P<version>\\\\S+)\\\\s+(?P<bootprom>\\\\S+)\\\\s+(?P<hardware>\\\\S+)'\n , re.MULTILINE)\n rx_ver_escom_l = re.compile(\n 'SI3000 ESCOM L Series Software,\\\\s*Version\\\\s(?P<version>\\\\S+) Build (?P<version_build>\\\\S+),'\n , re.MULTILINE)\n rx_hw_escom_l = re.compile(\n 'ROM:\\\\s*System Bootstrap, Version\\\\s*(?P<bootprom>\\\\S+),\\\\s*hardware version:\\\\s*(?P<hardware>\\\\S+)\\\\nSerial num:(?P<serial>\\\\S+), ID num:(?P<id_number>\\\\S+)\\\\nSystem image file is \\\\\"(?P<image>\\\\S+)\\\\\"'\n , re.MULTILINE)\n rx_platform = re.compile('^\\\\s*System Description:\\\\s+(?P<platform>.+)\\\\n',\n re.MULTILINE)\n rx_platform1 = re.compile('^\\\\s+1\\\\s+(?P<platform>\\\\S+)\\\\s*\\\\n', re.\n MULTILINE)\n rx_serial = re.compile('^\\\\s*Serial number : (?P<serial>\\\\S+)')\n\n def execute_cli(self, **kwargs):\n v = self.cli('show version', cached=True)\n for platform, ver in [('ESCOM L', self.rx_ver_escom_l), ('ESCOM',\n self.rx_ver), ('ESCOM', self.rx_ver1)]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == 'ESCOM L':\n hw_match = self.rx_hw_escom_l.search(v)\n return {'vendor': 'Iskratel', 'version': match.group('version'),\n 'platform': platform, 'image': hw_match.group('image'),\n 'attributes': {'Boot PROM': hw_match.group('bootprom'),\n 'HW version': hw_match.group('hardware'), 'Serial Number':\n hw_match.group('serial')}}\n r = {'vendor': 'Iskratel', 'version': match.group('version'),\n 'attributes': {'Boot PROM': match.group('bootprom'),\n 'HW version': match.group('hardware')}}\n v = self.cli('show system', cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r['platform'] = match.group('platform')\n v = self.cli('show system id', cached=True)\n match = self.rx_serial.search(v)\n if match:\n r['attributes']['Serial Number'] = match.group('serial')\n return r\n",
"step-5": "# ---------------------------------------------------------------------\n# Iskratel.ESCOM.get_version\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2018 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n name = \"Iskratel.ESCOM.get_version\"\n cache = True\n interface = IGetVersion\n\n rx_ver = re.compile(\n r\"^\\s*SW version\\s+(?P<version>\\S+).*\\n\"\n r\"^\\s*Boot version\\s+(?P<bootprom>\\S+).*\\n\"\n r\"^\\s*HW version\\s+(?P<hardware>\\S+).*\\n\",\n re.MULTILINE,\n )\n rx_ver1 = re.compile(\n r\"^\\s+1\\s+(?P<version>\\S+)\\s+(?P<bootprom>\\S+)\\s+(?P<hardware>\\S+)\", re.MULTILINE\n )\n rx_ver_escom_l = re.compile(\n r\"SI3000 ESCOM L Series Software,\\s*Version\\s(?P<version>\\S+) Build (?P<version_build>\\S+),\",\n re.MULTILINE,\n )\n rx_hw_escom_l = re.compile(\n r\"ROM:\\s*System Bootstrap, Version\\s*(?P<bootprom>\\S+),\\s*hardware version:\\s*(?P<hardware>\\S+)\\n\"\n r\"Serial num:(?P<serial>\\S+), ID num:(?P<id_number>\\S+)\\n\"\n r\"System image file is \\\"(?P<image>\\S+)\\\"\",\n re.MULTILINE,\n )\n rx_platform = re.compile(r\"^\\s*System Description:\\s+(?P<platform>.+)\\n\", re.MULTILINE)\n rx_platform1 = re.compile(r\"^\\s+1\\s+(?P<platform>\\S+)\\s*\\n\", re.MULTILINE)\n rx_serial = re.compile(r\"^\\s*Serial number : (?P<serial>\\S+)\")\n\n def execute_cli(self, **kwargs):\n v = self.cli(\"show version\", cached=True)\n for platform, ver in [\n (\"ESCOM L\", self.rx_ver_escom_l),\n (\"ESCOM\", self.rx_ver),\n (\"ESCOM\", self.rx_ver1),\n ]:\n match = ver.search(v)\n if match:\n break\n else:\n raise NotImplementedError\n if platform == \"ESCOM L\":\n hw_match = self.rx_hw_escom_l.search(v)\n return {\n \"vendor\": \"Iskratel\",\n \"version\": match.group(\"version\"),\n \"platform\": platform,\n \"image\": hw_match.group(\"image\"),\n \"attributes\": {\n \"Boot PROM\": hw_match.group(\"bootprom\"),\n \"HW version\": hw_match.group(\"hardware\"),\n \"Serial Number\": hw_match.group(\"serial\"),\n },\n }\n r = {\n \"vendor\": \"Iskratel\",\n \"version\": match.group(\"version\"),\n \"attributes\": {\n \"Boot PROM\": match.group(\"bootprom\"),\n \"HW version\": match.group(\"hardware\"),\n },\n }\n v = self.cli(\"show system\", cached=True)\n match = self.rx_platform.search(v)\n if not match:\n match = self.rx_platform1.search(v)\n r[\"platform\"] = match.group(\"platform\")\n v = self.cli(\"show system id\", cached=True)\n match = self.rx_serial.search(v)\n if match:\n r[\"attributes\"][\"Serial Number\"] = match.group(\"serial\")\n return r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
...
...
model = Sequential()
model.add(Conv2D(32, kernel_size=3, input_shape=(256, 256, 3))
...
...
|
normal
|
{
"blob_id": "ad054febac3a04c625653a2f3864506eeb672d9e",
"index": 6273,
"step-1": "...\n...\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=3, input_shape=(256, 256, 3))\n...\n...\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Cellule:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def basculer(self):
"""mutateur qui change l'état actuel de la cellule pour l'état futur"""
self.__actuel = self.__futur
def __str__(self):
"""méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀"""
if self.__actuel == True:
return '🌱'
else:
return '💀'
<|reserved_special_token_0|>
class Grille:
def __init__(self):
"""constructeur qui initialise les variables"""
self.largeur = 20
self.hauteur = 30
self.matrice = []
def clear_matrice(self):
"""mutateur qui remet la matrice à 0"""
self.matrice = []
def set_largeur(self, x):
"""mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.largeur = x
def set_hauteur(self, x):
"""mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.hauteur = x
def dansgrille(self, i, j):
"""Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille"""
if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:
return True
else:
return False
def setXY(self, i, j, valeur):
"""mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit"""
if self.dansgrille(i, j) == True:
self.matrice[i][j] = valeur
else:
return 'out of range, not added'
def getXY(self, i, j):
"""accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
return self.matrice[i][j]
def get_largeur(self):
"""accesseur qui retourne la largeur de la grille"""
return self.largeur
def get_hauteur(self):
"""accesseur qui permet de récupérer la valeur de la hauteur de la grille"""
return self.hauteur
@staticmethod
def est_voisins(i, j, x, y, instance):
"""fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore"""
abx = None
ordo = None
if i == x and j == y:
return False
for b in range(-1, 2):
if b + i < 0:
abx = instance.largeur + b
if b + i > instance.get_largeur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = instance.get_hauteur() + c
if j + c > instance.get_hauteur() - 1:
ordo = 0
else:
ordo = c + j
if x == abx and y == ordo:
return True
return False
def get8voisins(self, i, j):
"""fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
L_voisins = []
for b in range(-1, 2):
if b + i < 0:
abx = self.get_hauteur() + b
elif b + i > self.get_hauteur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = self.get_largeur() + c
elif j + c > self.get_largeur() - 1:
ordo = 0
else:
ordo = c + j
if abx != i or ordo != j:
data = self.getXY(abx, ordo)
L_voisins.append(data.est_vivant())
return L_voisins
else:
return None
def __str__(self):
"""fonction qui affiche le jeu de la vie dans le terminal"""
for i in range(len(self.matrice)):
display = []
for j in range(len(self.matrice[i])):
display.append(self.matrice[i][j].__str__())
print(display)
print('\n')
def getallstate(self):
"""fonction qui renvoie tout les etats"""
allin = ''
for i in range(len(self.matrice)):
display = ''
for j in range(len(self.matrice[i])):
display = display + str(self.matrice[i][j].__str__())
allin = allin + display + '\n'
return allin
def remplir_alea(self, pourcent):
"""fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes"""
if 0 < int(pourcent) <= 100:
cases = self.largeur * self.hauteur
nombre = int(cases * (pourcent / 100))
L_vivant = []
while len(L_vivant) != nombre:
y = randint(0, self.largeur - 1)
x = randint(0, self.hauteur - 1)
if (x, y) not in L_vivant:
L_vivant.append((x, y))
for i in range(0, self.hauteur):
self.matrice.append([])
for b in range(0, self.largeur):
self.matrice[i].append(Cellule())
if (i, b) in L_vivant:
cellule = self.getXY(i, b)
cellule.naitre()
cellule.basculer()
self.setXY(i, b, cellule)
else:
return False
def Jeu(self):
"""fonction qui calcule l'état futur de chaque cellule"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.set_voisins(self.get8voisins(i, b))
cellule.calcule_etat_futur()
self.setXY(i, b, cellule)
def actualise(self):
"""fonction qui actualise l'état de toutes le cellules"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.basculer()
self.setXY(i, b, cellule)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cellule:
def __init__(self):
"""constructeur qui initialise les variables"""
self.__actuel = False
self.__futur = False
self.__voisins = None
<|reserved_special_token_0|>
def set_voisins(self, L):
"""mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste"""
if type(L) == list:
self.__voisins = L
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def basculer(self):
"""mutateur qui change l'état actuel de la cellule pour l'état futur"""
self.__actuel = self.__futur
def __str__(self):
"""méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀"""
if self.__actuel == True:
return '🌱'
else:
return '💀'
def calcule_etat_futur(self):
"""fonction qui calcule l'état futur des cellules via les règles du jeu"""
acc = 0
for i in range(len(self.__voisins)):
if self.__voisins[i] == True:
acc = acc + 1
if acc == 3 and self.__actuel == False:
self.naitre()
if acc != 3 and self.__actuel == False:
self.mourir()
elif (acc == 2 or acc == 3) and self.__actuel == True:
self.naitre()
elif (acc != 2 or acc != 3) and self.__actuel == True:
self.mourir()
class Grille:
def __init__(self):
"""constructeur qui initialise les variables"""
self.largeur = 20
self.hauteur = 30
self.matrice = []
def clear_matrice(self):
"""mutateur qui remet la matrice à 0"""
self.matrice = []
def set_largeur(self, x):
"""mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.largeur = x
def set_hauteur(self, x):
"""mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.hauteur = x
def dansgrille(self, i, j):
"""Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille"""
if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:
return True
else:
return False
def setXY(self, i, j, valeur):
"""mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit"""
if self.dansgrille(i, j) == True:
self.matrice[i][j] = valeur
else:
return 'out of range, not added'
def getXY(self, i, j):
"""accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
return self.matrice[i][j]
def get_largeur(self):
"""accesseur qui retourne la largeur de la grille"""
return self.largeur
def get_hauteur(self):
"""accesseur qui permet de récupérer la valeur de la hauteur de la grille"""
return self.hauteur
@staticmethod
def est_voisins(i, j, x, y, instance):
"""fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore"""
abx = None
ordo = None
if i == x and j == y:
return False
for b in range(-1, 2):
if b + i < 0:
abx = instance.largeur + b
if b + i > instance.get_largeur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = instance.get_hauteur() + c
if j + c > instance.get_hauteur() - 1:
ordo = 0
else:
ordo = c + j
if x == abx and y == ordo:
return True
return False
def get8voisins(self, i, j):
"""fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
L_voisins = []
for b in range(-1, 2):
if b + i < 0:
abx = self.get_hauteur() + b
elif b + i > self.get_hauteur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = self.get_largeur() + c
elif j + c > self.get_largeur() - 1:
ordo = 0
else:
ordo = c + j
if abx != i or ordo != j:
data = self.getXY(abx, ordo)
L_voisins.append(data.est_vivant())
return L_voisins
else:
return None
def __str__(self):
"""fonction qui affiche le jeu de la vie dans le terminal"""
for i in range(len(self.matrice)):
display = []
for j in range(len(self.matrice[i])):
display.append(self.matrice[i][j].__str__())
print(display)
print('\n')
def getallstate(self):
"""fonction qui renvoie tout les etats"""
allin = ''
for i in range(len(self.matrice)):
display = ''
for j in range(len(self.matrice[i])):
display = display + str(self.matrice[i][j].__str__())
allin = allin + display + '\n'
return allin
def remplir_alea(self, pourcent):
"""fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes"""
if 0 < int(pourcent) <= 100:
cases = self.largeur * self.hauteur
nombre = int(cases * (pourcent / 100))
L_vivant = []
while len(L_vivant) != nombre:
y = randint(0, self.largeur - 1)
x = randint(0, self.hauteur - 1)
if (x, y) not in L_vivant:
L_vivant.append((x, y))
for i in range(0, self.hauteur):
self.matrice.append([])
for b in range(0, self.largeur):
self.matrice[i].append(Cellule())
if (i, b) in L_vivant:
cellule = self.getXY(i, b)
cellule.naitre()
cellule.basculer()
self.setXY(i, b, cellule)
else:
return False
def Jeu(self):
"""fonction qui calcule l'état futur de chaque cellule"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.set_voisins(self.get8voisins(i, b))
cellule.calcule_etat_futur()
self.setXY(i, b, cellule)
def actualise(self):
"""fonction qui actualise l'état de toutes le cellules"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.basculer()
self.setXY(i, b, cellule)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cellule:
def __init__(self):
"""constructeur qui initialise les variables"""
self.__actuel = False
self.__futur = False
self.__voisins = None
def est_vivant(self):
"""accesseur qui retourne l'état actuel de la cellule"""
return self.__actuel
def set_voisins(self, L):
"""mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste"""
if type(L) == list:
self.__voisins = L
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def basculer(self):
"""mutateur qui change l'état actuel de la cellule pour l'état futur"""
self.__actuel = self.__futur
def __str__(self):
"""méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀"""
if self.__actuel == True:
return '🌱'
else:
return '💀'
def calcule_etat_futur(self):
"""fonction qui calcule l'état futur des cellules via les règles du jeu"""
acc = 0
for i in range(len(self.__voisins)):
if self.__voisins[i] == True:
acc = acc + 1
if acc == 3 and self.__actuel == False:
self.naitre()
if acc != 3 and self.__actuel == False:
self.mourir()
elif (acc == 2 or acc == 3) and self.__actuel == True:
self.naitre()
elif (acc != 2 or acc != 3) and self.__actuel == True:
self.mourir()
class Grille:
def __init__(self):
"""constructeur qui initialise les variables"""
self.largeur = 20
self.hauteur = 30
self.matrice = []
def clear_matrice(self):
"""mutateur qui remet la matrice à 0"""
self.matrice = []
def set_largeur(self, x):
"""mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.largeur = x
def set_hauteur(self, x):
"""mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.hauteur = x
def dansgrille(self, i, j):
"""Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille"""
if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:
return True
else:
return False
def setXY(self, i, j, valeur):
"""mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit"""
if self.dansgrille(i, j) == True:
self.matrice[i][j] = valeur
else:
return 'out of range, not added'
def getXY(self, i, j):
"""accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
return self.matrice[i][j]
def get_largeur(self):
"""accesseur qui retourne la largeur de la grille"""
return self.largeur
def get_hauteur(self):
"""accesseur qui permet de récupérer la valeur de la hauteur de la grille"""
return self.hauteur
@staticmethod
def est_voisins(i, j, x, y, instance):
"""fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore"""
abx = None
ordo = None
if i == x and j == y:
return False
for b in range(-1, 2):
if b + i < 0:
abx = instance.largeur + b
if b + i > instance.get_largeur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = instance.get_hauteur() + c
if j + c > instance.get_hauteur() - 1:
ordo = 0
else:
ordo = c + j
if x == abx and y == ordo:
return True
return False
def get8voisins(self, i, j):
"""fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
L_voisins = []
for b in range(-1, 2):
if b + i < 0:
abx = self.get_hauteur() + b
elif b + i > self.get_hauteur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = self.get_largeur() + c
elif j + c > self.get_largeur() - 1:
ordo = 0
else:
ordo = c + j
if abx != i or ordo != j:
data = self.getXY(abx, ordo)
L_voisins.append(data.est_vivant())
return L_voisins
else:
return None
def __str__(self):
"""fonction qui affiche le jeu de la vie dans le terminal"""
for i in range(len(self.matrice)):
display = []
for j in range(len(self.matrice[i])):
display.append(self.matrice[i][j].__str__())
print(display)
print('\n')
def getallstate(self):
"""fonction qui renvoie tout les etats"""
allin = ''
for i in range(len(self.matrice)):
display = ''
for j in range(len(self.matrice[i])):
display = display + str(self.matrice[i][j].__str__())
allin = allin + display + '\n'
return allin
def remplir_alea(self, pourcent):
"""fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes"""
if 0 < int(pourcent) <= 100:
cases = self.largeur * self.hauteur
nombre = int(cases * (pourcent / 100))
L_vivant = []
while len(L_vivant) != nombre:
y = randint(0, self.largeur - 1)
x = randint(0, self.hauteur - 1)
if (x, y) not in L_vivant:
L_vivant.append((x, y))
for i in range(0, self.hauteur):
self.matrice.append([])
for b in range(0, self.largeur):
self.matrice[i].append(Cellule())
if (i, b) in L_vivant:
cellule = self.getXY(i, b)
cellule.naitre()
cellule.basculer()
self.setXY(i, b, cellule)
else:
return False
def Jeu(self):
"""fonction qui calcule l'état futur de chaque cellule"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.set_voisins(self.get8voisins(i, b))
cellule.calcule_etat_futur()
self.setXY(i, b, cellule)
def actualise(self):
"""fonction qui actualise l'état de toutes le cellules"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.basculer()
self.setXY(i, b, cellule)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Cellule:
def __init__(self):
"""constructeur qui initialise les variables"""
self.__actuel = False
self.__futur = False
self.__voisins = None
def est_vivant(self):
"""accesseur qui retourne l'état actuel de la cellule"""
return self.__actuel
def set_voisins(self, L):
"""mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste"""
if type(L) == list:
self.__voisins = L
def get_voisins(self):
"""accesseur qui renvoie les voisins de la cellule"""
return self.__voisins
def naitre(self):
"""mutateur qui passe l'état futur de la cellule à True"""
self.__futur = True
def mourir(self):
"""mutateur qui passe l'état futur de la cellule à False"""
self.__futur = False
def basculer(self):
"""mutateur qui change l'état actuel de la cellule pour l'état futur"""
self.__actuel = self.__futur
def __str__(self):
"""méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀"""
if self.__actuel == True:
return '🌱'
else:
return '💀'
def calcule_etat_futur(self):
"""fonction qui calcule l'état futur des cellules via les règles du jeu"""
acc = 0
for i in range(len(self.__voisins)):
if self.__voisins[i] == True:
acc = acc + 1
if acc == 3 and self.__actuel == False:
self.naitre()
if acc != 3 and self.__actuel == False:
self.mourir()
elif (acc == 2 or acc == 3) and self.__actuel == True:
self.naitre()
elif (acc != 2 or acc != 3) and self.__actuel == True:
self.mourir()
class Grille:
def __init__(self):
"""constructeur qui initialise les variables"""
self.largeur = 20
self.hauteur = 30
self.matrice = []
def clear_matrice(self):
"""mutateur qui remet la matrice à 0"""
self.matrice = []
def set_largeur(self, x):
"""mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.largeur = x
def set_hauteur(self, x):
"""mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre"""
if type(x) == int:
self.hauteur = x
def dansgrille(self, i, j):
"""Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille"""
if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:
return True
else:
return False
def setXY(self, i, j, valeur):
"""mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit"""
if self.dansgrille(i, j) == True:
self.matrice[i][j] = valeur
else:
return 'out of range, not added'
def getXY(self, i, j):
"""accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
return self.matrice[i][j]
def get_largeur(self):
"""accesseur qui retourne la largeur de la grille"""
return self.largeur
def get_hauteur(self):
"""accesseur qui permet de récupérer la valeur de la hauteur de la grille"""
return self.hauteur
@staticmethod
def est_voisins(i, j, x, y, instance):
"""fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore"""
abx = None
ordo = None
if i == x and j == y:
return False
for b in range(-1, 2):
if b + i < 0:
abx = instance.largeur + b
if b + i > instance.get_largeur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = instance.get_hauteur() + c
if j + c > instance.get_hauteur() - 1:
ordo = 0
else:
ordo = c + j
if x == abx and y == ordo:
return True
return False
def get8voisins(self, i, j):
"""fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille"""
if self.dansgrille(i, j) == True:
L_voisins = []
for b in range(-1, 2):
if b + i < 0:
abx = self.get_hauteur() + b
elif b + i > self.get_hauteur() - 1:
abx = 0
else:
abx = b + i
for c in range(-1, 2):
if j + c < 0:
ordo = self.get_largeur() + c
elif j + c > self.get_largeur() - 1:
ordo = 0
else:
ordo = c + j
if abx != i or ordo != j:
data = self.getXY(abx, ordo)
L_voisins.append(data.est_vivant())
return L_voisins
else:
return None
def __str__(self):
"""fonction qui affiche le jeu de la vie dans le terminal"""
for i in range(len(self.matrice)):
display = []
for j in range(len(self.matrice[i])):
display.append(self.matrice[i][j].__str__())
print(display)
print('\n')
def getallstate(self):
"""fonction qui renvoie tout les etats"""
allin = ''
for i in range(len(self.matrice)):
display = ''
for j in range(len(self.matrice[i])):
display = display + str(self.matrice[i][j].__str__())
allin = allin + display + '\n'
return allin
def remplir_alea(self, pourcent):
"""fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes"""
if 0 < int(pourcent) <= 100:
cases = self.largeur * self.hauteur
nombre = int(cases * (pourcent / 100))
L_vivant = []
while len(L_vivant) != nombre:
y = randint(0, self.largeur - 1)
x = randint(0, self.hauteur - 1)
if (x, y) not in L_vivant:
L_vivant.append((x, y))
for i in range(0, self.hauteur):
self.matrice.append([])
for b in range(0, self.largeur):
self.matrice[i].append(Cellule())
if (i, b) in L_vivant:
cellule = self.getXY(i, b)
cellule.naitre()
cellule.basculer()
self.setXY(i, b, cellule)
else:
return False
def Jeu(self):
"""fonction qui calcule l'état futur de chaque cellule"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.set_voisins(self.get8voisins(i, b))
cellule.calcule_etat_futur()
self.setXY(i, b, cellule)
def actualise(self):
"""fonction qui actualise l'état de toutes le cellules"""
for i in range(0, self.hauteur):
for b in range(0, self.largeur):
cellule = self.getXY(i, b)
cellule.basculer()
self.setXY(i, b, cellule)
def bakbak(*args):
"""fonction qui affiche le jeu en interface graphique"""
prépartie.pack_forget()
plateau.clear_matrice()
plateau.set_largeur(int(numberofcol.get()))
plateau.set_hauteur(int(numberoflign.get()))
tours = int(numberofturn.get())
partie = Frame(fenetre, bg='#85c17e')
plateau.remplir_alea(int(pourcent.get()))
touracc = StringVar()
printer = Label(partie, textvariable=touracc, bg='#85c17e', font=(
'Time News Roman', 19), fg='white')
printer.pack(padx=10, pady=10)
actuel = StringVar()
actuel.set('')
printer1 = Label(partie, textvariable=actuel, bg='#85c17e', font=(
'Time News Roman', 20))
printer1.pack()
def update(n=1, chain=''):
"""fonction récursive qui prends pour paramètres un chaine de caractères et un nombre entier"""
ch = ''
if n < tours + 1:
touracc.set('Tour: ' + str(n))
plateau.Jeu()
actuel.set(plateau.getallstate())
plateau.actualise()
ch = 'Tour: ' + str(n) + '\n' + plateau.getallstate() + '\n'
chain = chain + ch
partie.after(2000, update, n + 1, chain)
else:
def save():
"""fonction qui sauvegarde chaque étape de la partie au forma .txt"""
f = filedialog.asksaveasfilename(initialdir='/', title=
'Select file', filetypes=('txt files', ('all files',
'*.*')))
if f is None:
return
with open(f, 'w', encoding='utf-8') as result:
result.write(chain)
result.close()
Button(partie, text='Recommencer', command=restart, bg=
'#85c17e', font=('Noto Serif', 11)).pack(side=LEFT, padx=5)
Button(partie, text='Sauvegarder la partie', command=save, bg=
'#85c17e', font=('Noto Serif', 11)).pack(side=LEFT, padx=5)
def restart():
partie.destroy()
prépartie.pack()
update()
Button(partie, text='Quitter le jeu (fermera complètement la fenêtre)',
command=quit, bg='#85c17e', font=('Noto Serif', 11)).pack(side=LEFT,
padx=10)
partie.pack()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
'''
LE JEU DE LA VIE
Mini projet numéro 2 de NSI
Modélisation Objet :
Q1) On peut dégager, au premier abord : une classe cellule (avec un attribut état et un autre voisins) et une classe grille (avec un attribut ordonnée et un autre abscisse). En effet, ce sont les deux éléments du jeu.
Q2) On pourrait donner une méthode pour changer l’état de la cellule, une autre pour obtenir son état. Une autre pour définir les voisins et encore une pour les obtenir. Avec ces méthodes, on pourra modifier l’état des cellules et calculer celui-ci en fonction de ses voisins.
Pour ce qui est de la classe grille, on pourrait coder une méthode pour obtenir les coordonnées, un autre pour les modifier. Ainsi, on pourra placer les cellules dans la grille là où il n’y en a pas forcément.
Q3) Il sera plus simple de représenter la notion de voisinage dans la cellule avec la classe grille. En effet, c’est elle qui contient les coordonnées.
Quand au calcul de celle-ci, elle sera plus simple dans la classe cellule car c’est cette classe qui contient l’état.
Q4) Une cellule qui n’est pas au bord a 8 voisins. En effet, le voisinage de Moore compte les diagonales.
Une cellule sur le côté mais pas dans un coin a 5 voisins.
Dans un coin, elle en a 3.
Q5) Pour la case en haut à droite, on pourrait considérer comme voisin de droite la case tout en haut à gauche. Pour le voisin du haut, on peut considérer la case de même abscisse mais d’ordonnée 0 (celle tout en bas). En fait, on prendrait la case d’abscisse ou d’ordonnée « opposée » comme suivante. Cela afin de ne pas avoir que 3 cases prises en compte lors du calcul de l’état en fonction des voisins.
Q8) Cela peut être utile pour vérifier facilement l’état d’une cellule. Ainsi, on peut l’interpréter dans une autre fonction ou même créer facilement une liste qui serait utilisée comme « historique » de la cellule.
'''
from random import randint
from time import sleep
from tkinter import filedialog
from tkinter import *
class Cellule:
def __init__(self):
'''constructeur qui initialise les variables'''
self.__actuel= False
self.__futur= False
self.__voisins= None
def est_vivant(self):
'''accesseur qui retourne l'état actuel de la cellule'''
return self.__actuel
def set_voisins(self,L):
'''mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste'''
if type(L)== list: #vérifie si l'élément donné est bien une liste
self.__voisins= L
def get_voisins(self):
'''accesseur qui renvoie les voisins de la cellule'''
return self.__voisins
def naitre(self):
'''mutateur qui passe l'état futur de la cellule à True'''
self.__futur=True
def mourir(self):
'''mutateur qui passe l'état futur de la cellule à False'''
self.__futur=False
def basculer(self):
'''mutateur qui change l'état actuel de la cellule pour l'état futur'''
self.__actuel=self.__futur
def __str__(self):
'''méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀'''
if self.__actuel== True:
return "🌱"
else:
return "💀"
def calcule_etat_futur(self):
'''fonction qui calcule l'état futur des cellules via les règles du jeu'''
acc=0 #compteur qui va permettre de savoir le nombre de voisins vivants
for i in range (len(self.__voisins)):#parcours tout les voisins
if self.__voisins[i]==True: #si le voisin est vivant
acc=acc+1
#vérifie toutes les possibilités offertes par les règles du voisinage de Moore
if acc==3 and self.__actuel==False:
self.naitre()
if acc!=3 and self.__actuel==False:
self.mourir()
elif (acc==2 or acc==3) and self.__actuel==True:
self.naitre()
elif (acc!=2 or acc!=3) and self.__actuel==True:
self.mourir()
class Grille:
def __init__(self):
'''constructeur qui initialise les variables'''
self.largeur=20
self.hauteur=30
self.matrice=[]
def clear_matrice(self):
'''mutateur qui remet la matrice à 0'''
self.matrice=[]
def set_largeur(self,x):
'''mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre'''
if type(x)==int:
self.largeur=x
def set_hauteur(self,x):
'''mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre'''
if type(x)==int:
self.hauteur=x
def dansgrille(self,i,j):
'''Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille'''
if self.hauteur-1>=i>=0 and self.largeur-1>=j>=0: #si les coordonées de l'utilisateur sont comprises entre 0 et la largeur/hauteur en fonction du point
return True
else:
return False
def setXY(self,i,j,valeur):
'''mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit'''
if self.dansgrille(i,j)==True:
self.matrice[i][j]= valeur
else:
return 'out of range, not added'
def getXY(self,i,j):
'''accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille'''
if self.dansgrille(i,j)==True:
return self.matrice[i][j]
def get_largeur(self):
'''accesseur qui retourne la largeur de la grille'''
return self.largeur
def get_hauteur(self):
'''accesseur qui permet de récupérer la valeur de la hauteur de la grille'''
return self.hauteur
@staticmethod
def est_voisins(i,j,x,y,instance):
'''fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore'''
abx=None
ordo=None
if i==x and j==y: #si c'est le même point
return False
for b in range (-1,2):
#regarde si i est en bordure, adapte le point voisin à chercher en fonction
if b+i<0:
abx= instance.largeur+b
if b+i>instance.get_largeur()-1:
abx=0
else:
abx=b+i
for c in range(-1,2):
#regarde si j est en bordure, adapte le point voisin à chercher en fonction
if j+c<0:
ordo= instance.get_hauteur()+c
if j+c>instance.get_hauteur()-1:
ordo=0
else:
ordo=c+j
#compare le point à chercher avec x et y. Si ils correspondent, ça veut dire que les points sont voisins
if x==abx and y==ordo:
return True
return False #si aucun des points n'est voisins. En effet, sinon la fonction s'arrête avec le "return True" à la ligne 162
def get8voisins(self,i,j):
'''fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille'''
if self.dansgrille(i,j)==True:
L_voisins=[]
for b in range (-1,2):
#regarde si i est en bordure, adapte le point voisin à chercher en fonction
if b+i<0:
abx= self.get_hauteur()+b
elif b+i>self.get_hauteur()-1:
abx=0
else:
abx=b+i
for c in range(-1,2):
#regarde si j est en bordure, adapte le point voisin à chercher en fonction
if j+c<0:
ordo= self.get_largeur()+c
elif j+c>self.get_largeur()-1:
ordo=0
else:
ordo=c+j
#ajoute le point voisin
if abx!=i or ordo!=j:
data=self.getXY(abx,ordo)
L_voisins.append(data.est_vivant())
return L_voisins
else:
return None
def __str__(self):
'''fonction qui affiche le jeu de la vie dans le terminal'''
#parcours tout les éléments du jeu
for i in range(len(self.matrice)):
display=[]
for j in range(len(self.matrice[i])):
display.append(self.matrice[i][j].__str__())#ajoute le résultat
print(display)
print('\n')
def getallstate(self):
'''fonction qui renvoie tout les etats'''
#parcours tout les éléments du jeu
allin=""
for i in range(len(self.matrice)):
display=""
for j in range(len(self.matrice[i])):
display=display+str(self.matrice[i][j].__str__())#ajoute le résultat
allin=allin+display+'\n'
return allin
def remplir_alea(self,pourcent):
'''fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes'''
if 0<int(pourcent)<=100:
cases=self.largeur*self.hauteur
nombre=int(cases*(pourcent/100))# calcule le nombre de cellules devant être vivantes
L_vivant=[]
while len(L_vivant)!=nombre: #boucle qui crée des ordonées ou abscisses aléatoire jusqu'a ce que le nombre demandé soit atteint
y= randint(0,self.largeur-1)
x= randint(0, self.hauteur-1)
if (x,y) not in L_vivant: #ajoute l'abscisse si elle n'est pas deja dans la liste
L_vivant.append((x,y))
for i in range(0,self.hauteur):
self.matrice.append([])#crée une nouvelle liste pour stocker une ligne supplémentaire
for b in range(0,self.largeur):
self.matrice[i].append(Cellule())#ajoute une cellule dans la liste
if (i,b) in L_vivant: #si la cellule est dans la liste de celles devant être vivantes
cellule=self.getXY(i,b) #obtient la cellule
cellule.naitre()#passer l'état futur de la celllule à vivant
cellule.basculer()#fait naître la cellule
self.setXY(i,b,cellule) #remplace la cellule "morte"
else:
return False
def Jeu(self):
'''fonction qui calcule l'état futur de chaque cellule'''
for i in range(0,self.hauteur): #parcours chaque point en hauteur
for b in range(0,self.largeur):#parcours chaque point en largeur
cellule=self.getXY(i,b) #obtiens les informations
cellule.set_voisins(self.get8voisins(i,b))#change la valeur des voisins du point
cellule.calcule_etat_futur()
self.setXY(i,b,cellule)#change les données de la cellule dans la matrice
def actualise(self):
'''fonction qui actualise l'état de toutes le cellules'''
for i in range(0,self.hauteur):#parcours chaque point en hauteur
for b in range(0,self.largeur):#parcours chaque point en largeur
cellule=self.getXY(i,b) #obtiens la cellule
cellule.basculer() #bascule l'état
self.setXY(i,b,cellule) #change l'état de la cellule dans la matrice
def bakbak(*args):
'''fonction qui affiche le jeu en interface graphique'''
prépartie.pack_forget() #cache l'écran de paramètrage
plateau.clear_matrice() #nettoye le plateau
plateau.set_largeur(int(numberofcol.get()))#change la largeur du plateau selon celle renseignée
plateau.set_hauteur(int(numberoflign.get()))#change la hauteur du plateau selon celle renseignée
tours=int(numberofturn.get()) #change le nombre de tours
partie=Frame(fenetre,bg='#85c17e')
plateau.remplir_alea(int(pourcent.get()))#crée le plateau de base
touracc=StringVar()
printer=Label(partie, textvariable=touracc,bg="#85c17e",font=('Time News Roman', 19), fg="white")#crée une zone de texte dynamique pour le nombre de tour
printer.pack(padx=10,pady=10)
actuel=StringVar()
actuel.set('')
printer1=Label(partie, textvariable=actuel,bg="#85c17e",font=('Time News Roman', 20))#crée une zone de texte dynamique pour le plateau
printer1.pack()
def update(n=1,chain=''):
'''fonction récursive qui prends pour paramètres un chaine de caractères et un nombre entier'''
ch=''
if n<tours+1:#si le nombre de tours demandés n'a pas été effectué
touracc.set("Tour: "+str(n)) #affiche le numéro du tour
plateau.Jeu()#actualise le jeu
actuel.set(plateau.getallstate())#affiche le plateau pour ce tour
plateau.actualise()#actualise le tour
ch="Tour: "+str(n)+"\n"+plateau.getallstate()+'\n' #stocke les infos du tour
chain=chain+ch#les ajoute à l'historique des tours
partie.after(2000, update, n+1,chain)#attends 2 secondes puis recommence
else:#si le nombre de tours demandés a été effectué
def save():
'''fonction qui sauvegarde chaque étape de la partie au forma .txt'''
f = filedialog.asksaveasfilename(initialdir = "/",title = "Select file",filetypes = (("txt files"),("all files","*.*")))
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
with open(f,'w',encoding='utf-8') as result: #enregsitre le fichier sous le format UTF-8
result.write(chain)
result.close()
Button(partie,text="Recommencer",command=restart,bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=5) #affiche un bouton pour resélectionner des paramètres
Button(partie,text='Sauvegarder la partie',command=save,bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=5) #affiche un bouton pour sauvegarder l'historique de la partie
def restart():
partie.destroy()
prépartie.pack()
update()
Button(partie,text="Quitter le jeu (fermera complètement la fenêtre)",command=quit,bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10) #affiche un bouton pour quitter la fenêtre
partie.pack()
fenetre=Tk()
fenetre.title("Le jeu de la vie")
fenetre['bg']='#85c17e' #crée une couleur de fond verte
fenetre.geometry("1000x500") #choisis les dimensions de la fenêtre
plateau=Grille()
#titre de présentation
title= Label(fenetre, text="Le jeu de la vie", bg="#85c17e", font=('Courier New', 30), fg="white")
title.pack(padx=10)
'''Prépartie'''
prépartie=Frame(fenetre,borderwidth=2,relief='ridge',bg="#85c17e") #crée un cadre pour contenir tout les cadres de preparties
choix=Frame(prépartie,bg="#85c17e")#frame pour contenir le choix du pourcent
titlepourcent=Label(choix, text="Entrez le pourcentage de cellules vivantes souhaitées au démarrage ",bg="#85c17e",font=('Noto Serif', 11))
titlepourcent.pack(side=LEFT,padx=10)
'''pourcent'''
value = DoubleVar()
pourcent=Scale(choix, variable=value,cursor='dot',orient=HORIZONTAL,bg="#85c17e",troughcolor='white')#reglètte pour choisir le pourcentage
pourcent.pack(side=LEFT,padx=5,pady=10)
'''nombre de tours'''
nombretour=Frame(prépartie,bg="#85c17e")#contient le choix du nombre de tour
Label(nombretour, text="Entrez le nombre de tour souhaités",bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)
numberofturn = Spinbox(nombretour, from_=1, to=100,bg="#85c17e",buttonbackground='white',cursor='dot') #spinbox pour choisir le nombre de tours
numberofturn.pack()
'''hauteur et largeur'''
nombrecolonnes=Frame(prépartie,bg="#85c17e",pady=10)#contient le choix du nombre de colonnes
Label(nombrecolonnes, text="Entrez le nombre de colonnes souhaitées",bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)
numberofcol = Spinbox(nombrecolonnes, from_=1, to=100,bg="#85c17e",buttonbackground='white',cursor='dot')#spinbox pour choisir le nombre de colonnes
numberofcol.pack()
nombrelignes=Frame(prépartie,bg="#85c17e",pady=10)#contient le choix du nombre de colonnes
Label(nombrelignes, text="Entrez le nombre de lignes souhaitées",bg="#85c17e",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)
numberoflign = Spinbox(nombrelignes, from_=1, to=100,bg="#85c17e",buttonbackground='white',cursor='dot')#spinbox pour choisir le nombre de lignes
numberoflign.pack()
'''validation'''
validate= Frame(prépartie,bg="#B76E79")
Button(validate,text="Valider et lancer le jeu",command=bakbak,bg="#85c17e", activebackground='white',cursor='star',font=('Noto Serif', 11)).pack(side=LEFT,padx=10,pady=15) #bouton qui permet de valider les paramètres séléctionnés
prépartie.pack()
choix.pack()
nombretour.pack()
nombrecolonnes.pack()
nombrelignes.pack()
validate.pack()
fenetre.mainloop()
|
flexible
|
{
"blob_id": "cef904b70eb9a997c3c48884ee34665a77e18897",
"index": 8465,
"step-1": "<mask token>\n\n\nclass Cellule:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def basculer(self):\n \"\"\"mutateur qui change l'état actuel de la cellule pour l'état futur\"\"\"\n self.__actuel = self.__futur\n\n def __str__(self):\n \"\"\"méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀\"\"\"\n if self.__actuel == True:\n return '🌱'\n else:\n return '💀'\n <mask token>\n\n\nclass Grille:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.largeur = 20\n self.hauteur = 30\n self.matrice = []\n\n def clear_matrice(self):\n \"\"\"mutateur qui remet la matrice à 0\"\"\"\n self.matrice = []\n\n def set_largeur(self, x):\n \"\"\"mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.largeur = x\n\n def set_hauteur(self, x):\n \"\"\"mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.hauteur = x\n\n def dansgrille(self, i, j):\n \"\"\"Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille\"\"\"\n if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:\n return True\n else:\n return False\n\n def setXY(self, i, j, valeur):\n \"\"\"mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit\"\"\"\n if self.dansgrille(i, j) == True:\n self.matrice[i][j] = valeur\n else:\n return 'out of range, not added'\n\n def getXY(self, i, j):\n \"\"\"accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n return self.matrice[i][j]\n\n def get_largeur(self):\n \"\"\"accesseur qui retourne la largeur de la grille\"\"\"\n return self.largeur\n\n def get_hauteur(self):\n \"\"\"accesseur qui permet de récupérer la valeur de la hauteur de la grille\"\"\"\n return self.hauteur\n\n @staticmethod\n def est_voisins(i, j, x, y, instance):\n \"\"\"fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore\"\"\"\n abx = None\n ordo = None\n if i == x and j == y:\n return False\n for b in range(-1, 2):\n if b + i < 0:\n abx = instance.largeur + b\n if b + i > instance.get_largeur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = instance.get_hauteur() + c\n if j + c > instance.get_hauteur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if x == abx and y == ordo:\n return True\n return False\n\n def get8voisins(self, i, j):\n \"\"\"fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n L_voisins = []\n for b in range(-1, 2):\n if b + i < 0:\n abx = self.get_hauteur() + b\n elif b + i > self.get_hauteur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = self.get_largeur() + c\n elif j + c > self.get_largeur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if abx != i or ordo != j:\n data = self.getXY(abx, ordo)\n L_voisins.append(data.est_vivant())\n return L_voisins\n else:\n return None\n\n def __str__(self):\n \"\"\"fonction qui affiche le jeu de la vie dans le terminal\"\"\"\n for i in range(len(self.matrice)):\n display = []\n for j in range(len(self.matrice[i])):\n display.append(self.matrice[i][j].__str__())\n print(display)\n print('\\n')\n\n def getallstate(self):\n \"\"\"fonction qui renvoie tout les etats\"\"\"\n allin = ''\n for i in range(len(self.matrice)):\n display = ''\n for j in range(len(self.matrice[i])):\n display = display + str(self.matrice[i][j].__str__())\n allin = allin + display + '\\n'\n return allin\n\n def remplir_alea(self, pourcent):\n \"\"\"fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes\"\"\"\n if 0 < int(pourcent) <= 100:\n cases = self.largeur * self.hauteur\n nombre = int(cases * (pourcent / 100))\n L_vivant = []\n while len(L_vivant) != nombre:\n y = randint(0, self.largeur - 1)\n x = randint(0, self.hauteur - 1)\n if (x, y) not in L_vivant:\n L_vivant.append((x, y))\n for i in range(0, self.hauteur):\n self.matrice.append([])\n for b in range(0, self.largeur):\n self.matrice[i].append(Cellule())\n if (i, b) in L_vivant:\n cellule = self.getXY(i, b)\n cellule.naitre()\n cellule.basculer()\n self.setXY(i, b, cellule)\n else:\n return False\n\n def Jeu(self):\n \"\"\"fonction qui calcule l'état futur de chaque cellule\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.set_voisins(self.get8voisins(i, b))\n cellule.calcule_etat_futur()\n self.setXY(i, b, cellule)\n\n def actualise(self):\n \"\"\"fonction qui actualise l'état de toutes le cellules\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.basculer()\n self.setXY(i, b, cellule)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Cellule:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.__actuel = False\n self.__futur = False\n self.__voisins = None\n <mask token>\n\n def set_voisins(self, L):\n \"\"\"mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste\"\"\"\n if type(L) == list:\n self.__voisins = L\n <mask token>\n <mask token>\n <mask token>\n\n def basculer(self):\n \"\"\"mutateur qui change l'état actuel de la cellule pour l'état futur\"\"\"\n self.__actuel = self.__futur\n\n def __str__(self):\n \"\"\"méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀\"\"\"\n if self.__actuel == True:\n return '🌱'\n else:\n return '💀'\n\n def calcule_etat_futur(self):\n \"\"\"fonction qui calcule l'état futur des cellules via les règles du jeu\"\"\"\n acc = 0\n for i in range(len(self.__voisins)):\n if self.__voisins[i] == True:\n acc = acc + 1\n if acc == 3 and self.__actuel == False:\n self.naitre()\n if acc != 3 and self.__actuel == False:\n self.mourir()\n elif (acc == 2 or acc == 3) and self.__actuel == True:\n self.naitre()\n elif (acc != 2 or acc != 3) and self.__actuel == True:\n self.mourir()\n\n\nclass Grille:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.largeur = 20\n self.hauteur = 30\n self.matrice = []\n\n def clear_matrice(self):\n \"\"\"mutateur qui remet la matrice à 0\"\"\"\n self.matrice = []\n\n def set_largeur(self, x):\n \"\"\"mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.largeur = x\n\n def set_hauteur(self, x):\n \"\"\"mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.hauteur = x\n\n def dansgrille(self, i, j):\n \"\"\"Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille\"\"\"\n if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:\n return True\n else:\n return False\n\n def setXY(self, i, j, valeur):\n \"\"\"mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit\"\"\"\n if self.dansgrille(i, j) == True:\n self.matrice[i][j] = valeur\n else:\n return 'out of range, not added'\n\n def getXY(self, i, j):\n \"\"\"accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n return self.matrice[i][j]\n\n def get_largeur(self):\n \"\"\"accesseur qui retourne la largeur de la grille\"\"\"\n return self.largeur\n\n def get_hauteur(self):\n \"\"\"accesseur qui permet de récupérer la valeur de la hauteur de la grille\"\"\"\n return self.hauteur\n\n @staticmethod\n def est_voisins(i, j, x, y, instance):\n \"\"\"fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore\"\"\"\n abx = None\n ordo = None\n if i == x and j == y:\n return False\n for b in range(-1, 2):\n if b + i < 0:\n abx = instance.largeur + b\n if b + i > instance.get_largeur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = instance.get_hauteur() + c\n if j + c > instance.get_hauteur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if x == abx and y == ordo:\n return True\n return False\n\n def get8voisins(self, i, j):\n \"\"\"fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n L_voisins = []\n for b in range(-1, 2):\n if b + i < 0:\n abx = self.get_hauteur() + b\n elif b + i > self.get_hauteur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = self.get_largeur() + c\n elif j + c > self.get_largeur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if abx != i or ordo != j:\n data = self.getXY(abx, ordo)\n L_voisins.append(data.est_vivant())\n return L_voisins\n else:\n return None\n\n def __str__(self):\n \"\"\"fonction qui affiche le jeu de la vie dans le terminal\"\"\"\n for i in range(len(self.matrice)):\n display = []\n for j in range(len(self.matrice[i])):\n display.append(self.matrice[i][j].__str__())\n print(display)\n print('\\n')\n\n def getallstate(self):\n \"\"\"fonction qui renvoie tout les etats\"\"\"\n allin = ''\n for i in range(len(self.matrice)):\n display = ''\n for j in range(len(self.matrice[i])):\n display = display + str(self.matrice[i][j].__str__())\n allin = allin + display + '\\n'\n return allin\n\n def remplir_alea(self, pourcent):\n \"\"\"fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes\"\"\"\n if 0 < int(pourcent) <= 100:\n cases = self.largeur * self.hauteur\n nombre = int(cases * (pourcent / 100))\n L_vivant = []\n while len(L_vivant) != nombre:\n y = randint(0, self.largeur - 1)\n x = randint(0, self.hauteur - 1)\n if (x, y) not in L_vivant:\n L_vivant.append((x, y))\n for i in range(0, self.hauteur):\n self.matrice.append([])\n for b in range(0, self.largeur):\n self.matrice[i].append(Cellule())\n if (i, b) in L_vivant:\n cellule = self.getXY(i, b)\n cellule.naitre()\n cellule.basculer()\n self.setXY(i, b, cellule)\n else:\n return False\n\n def Jeu(self):\n \"\"\"fonction qui calcule l'état futur de chaque cellule\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.set_voisins(self.get8voisins(i, b))\n cellule.calcule_etat_futur()\n self.setXY(i, b, cellule)\n\n def actualise(self):\n \"\"\"fonction qui actualise l'état de toutes le cellules\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.basculer()\n self.setXY(i, b, cellule)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Cellule:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.__actuel = False\n self.__futur = False\n self.__voisins = None\n\n def est_vivant(self):\n \"\"\"accesseur qui retourne l'état actuel de la cellule\"\"\"\n return self.__actuel\n\n def set_voisins(self, L):\n \"\"\"mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste\"\"\"\n if type(L) == list:\n self.__voisins = L\n <mask token>\n <mask token>\n <mask token>\n\n def basculer(self):\n \"\"\"mutateur qui change l'état actuel de la cellule pour l'état futur\"\"\"\n self.__actuel = self.__futur\n\n def __str__(self):\n \"\"\"méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀\"\"\"\n if self.__actuel == True:\n return '🌱'\n else:\n return '💀'\n\n def calcule_etat_futur(self):\n \"\"\"fonction qui calcule l'état futur des cellules via les règles du jeu\"\"\"\n acc = 0\n for i in range(len(self.__voisins)):\n if self.__voisins[i] == True:\n acc = acc + 1\n if acc == 3 and self.__actuel == False:\n self.naitre()\n if acc != 3 and self.__actuel == False:\n self.mourir()\n elif (acc == 2 or acc == 3) and self.__actuel == True:\n self.naitre()\n elif (acc != 2 or acc != 3) and self.__actuel == True:\n self.mourir()\n\n\nclass Grille:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.largeur = 20\n self.hauteur = 30\n self.matrice = []\n\n def clear_matrice(self):\n \"\"\"mutateur qui remet la matrice à 0\"\"\"\n self.matrice = []\n\n def set_largeur(self, x):\n \"\"\"mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.largeur = x\n\n def set_hauteur(self, x):\n \"\"\"mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.hauteur = x\n\n def dansgrille(self, i, j):\n \"\"\"Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille\"\"\"\n if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:\n return True\n else:\n return False\n\n def setXY(self, i, j, valeur):\n \"\"\"mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit\"\"\"\n if self.dansgrille(i, j) == True:\n self.matrice[i][j] = valeur\n else:\n return 'out of range, not added'\n\n def getXY(self, i, j):\n \"\"\"accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n return self.matrice[i][j]\n\n def get_largeur(self):\n \"\"\"accesseur qui retourne la largeur de la grille\"\"\"\n return self.largeur\n\n def get_hauteur(self):\n \"\"\"accesseur qui permet de récupérer la valeur de la hauteur de la grille\"\"\"\n return self.hauteur\n\n @staticmethod\n def est_voisins(i, j, x, y, instance):\n \"\"\"fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore\"\"\"\n abx = None\n ordo = None\n if i == x and j == y:\n return False\n for b in range(-1, 2):\n if b + i < 0:\n abx = instance.largeur + b\n if b + i > instance.get_largeur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = instance.get_hauteur() + c\n if j + c > instance.get_hauteur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if x == abx and y == ordo:\n return True\n return False\n\n def get8voisins(self, i, j):\n \"\"\"fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n L_voisins = []\n for b in range(-1, 2):\n if b + i < 0:\n abx = self.get_hauteur() + b\n elif b + i > self.get_hauteur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = self.get_largeur() + c\n elif j + c > self.get_largeur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if abx != i or ordo != j:\n data = self.getXY(abx, ordo)\n L_voisins.append(data.est_vivant())\n return L_voisins\n else:\n return None\n\n def __str__(self):\n \"\"\"fonction qui affiche le jeu de la vie dans le terminal\"\"\"\n for i in range(len(self.matrice)):\n display = []\n for j in range(len(self.matrice[i])):\n display.append(self.matrice[i][j].__str__())\n print(display)\n print('\\n')\n\n def getallstate(self):\n \"\"\"fonction qui renvoie tout les etats\"\"\"\n allin = ''\n for i in range(len(self.matrice)):\n display = ''\n for j in range(len(self.matrice[i])):\n display = display + str(self.matrice[i][j].__str__())\n allin = allin + display + '\\n'\n return allin\n\n def remplir_alea(self, pourcent):\n \"\"\"fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes\"\"\"\n if 0 < int(pourcent) <= 100:\n cases = self.largeur * self.hauteur\n nombre = int(cases * (pourcent / 100))\n L_vivant = []\n while len(L_vivant) != nombre:\n y = randint(0, self.largeur - 1)\n x = randint(0, self.hauteur - 1)\n if (x, y) not in L_vivant:\n L_vivant.append((x, y))\n for i in range(0, self.hauteur):\n self.matrice.append([])\n for b in range(0, self.largeur):\n self.matrice[i].append(Cellule())\n if (i, b) in L_vivant:\n cellule = self.getXY(i, b)\n cellule.naitre()\n cellule.basculer()\n self.setXY(i, b, cellule)\n else:\n return False\n\n def Jeu(self):\n \"\"\"fonction qui calcule l'état futur de chaque cellule\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.set_voisins(self.get8voisins(i, b))\n cellule.calcule_etat_futur()\n self.setXY(i, b, cellule)\n\n def actualise(self):\n \"\"\"fonction qui actualise l'état de toutes le cellules\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.basculer()\n self.setXY(i, b, cellule)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Cellule:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.__actuel = False\n self.__futur = False\n self.__voisins = None\n\n def est_vivant(self):\n \"\"\"accesseur qui retourne l'état actuel de la cellule\"\"\"\n return self.__actuel\n\n def set_voisins(self, L):\n \"\"\"mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste\"\"\"\n if type(L) == list:\n self.__voisins = L\n\n def get_voisins(self):\n \"\"\"accesseur qui renvoie les voisins de la cellule\"\"\"\n return self.__voisins\n\n def naitre(self):\n \"\"\"mutateur qui passe l'état futur de la cellule à True\"\"\"\n self.__futur = True\n\n def mourir(self):\n \"\"\"mutateur qui passe l'état futur de la cellule à False\"\"\"\n self.__futur = False\n\n def basculer(self):\n \"\"\"mutateur qui change l'état actuel de la cellule pour l'état futur\"\"\"\n self.__actuel = self.__futur\n\n def __str__(self):\n \"\"\"méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀\"\"\"\n if self.__actuel == True:\n return '🌱'\n else:\n return '💀'\n\n def calcule_etat_futur(self):\n \"\"\"fonction qui calcule l'état futur des cellules via les règles du jeu\"\"\"\n acc = 0\n for i in range(len(self.__voisins)):\n if self.__voisins[i] == True:\n acc = acc + 1\n if acc == 3 and self.__actuel == False:\n self.naitre()\n if acc != 3 and self.__actuel == False:\n self.mourir()\n elif (acc == 2 or acc == 3) and self.__actuel == True:\n self.naitre()\n elif (acc != 2 or acc != 3) and self.__actuel == True:\n self.mourir()\n\n\nclass Grille:\n\n def __init__(self):\n \"\"\"constructeur qui initialise les variables\"\"\"\n self.largeur = 20\n self.hauteur = 30\n self.matrice = []\n\n def clear_matrice(self):\n \"\"\"mutateur qui remet la matrice à 0\"\"\"\n self.matrice = []\n\n def set_largeur(self, x):\n \"\"\"mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.largeur = x\n\n def set_hauteur(self, x):\n \"\"\"mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre\"\"\"\n if type(x) == int:\n self.hauteur = x\n\n def dansgrille(self, i, j):\n \"\"\"Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille\"\"\"\n if self.hauteur - 1 >= i >= 0 and self.largeur - 1 >= j >= 0:\n return True\n else:\n return False\n\n def setXY(self, i, j, valeur):\n \"\"\"mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit\"\"\"\n if self.dansgrille(i, j) == True:\n self.matrice[i][j] = valeur\n else:\n return 'out of range, not added'\n\n def getXY(self, i, j):\n \"\"\"accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n return self.matrice[i][j]\n\n def get_largeur(self):\n \"\"\"accesseur qui retourne la largeur de la grille\"\"\"\n return self.largeur\n\n def get_hauteur(self):\n \"\"\"accesseur qui permet de récupérer la valeur de la hauteur de la grille\"\"\"\n return self.hauteur\n\n @staticmethod\n def est_voisins(i, j, x, y, instance):\n \"\"\"fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore\"\"\"\n abx = None\n ordo = None\n if i == x and j == y:\n return False\n for b in range(-1, 2):\n if b + i < 0:\n abx = instance.largeur + b\n if b + i > instance.get_largeur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = instance.get_hauteur() + c\n if j + c > instance.get_hauteur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if x == abx and y == ordo:\n return True\n return False\n\n def get8voisins(self, i, j):\n \"\"\"fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille\"\"\"\n if self.dansgrille(i, j) == True:\n L_voisins = []\n for b in range(-1, 2):\n if b + i < 0:\n abx = self.get_hauteur() + b\n elif b + i > self.get_hauteur() - 1:\n abx = 0\n else:\n abx = b + i\n for c in range(-1, 2):\n if j + c < 0:\n ordo = self.get_largeur() + c\n elif j + c > self.get_largeur() - 1:\n ordo = 0\n else:\n ordo = c + j\n if abx != i or ordo != j:\n data = self.getXY(abx, ordo)\n L_voisins.append(data.est_vivant())\n return L_voisins\n else:\n return None\n\n def __str__(self):\n \"\"\"fonction qui affiche le jeu de la vie dans le terminal\"\"\"\n for i in range(len(self.matrice)):\n display = []\n for j in range(len(self.matrice[i])):\n display.append(self.matrice[i][j].__str__())\n print(display)\n print('\\n')\n\n def getallstate(self):\n \"\"\"fonction qui renvoie tout les etats\"\"\"\n allin = ''\n for i in range(len(self.matrice)):\n display = ''\n for j in range(len(self.matrice[i])):\n display = display + str(self.matrice[i][j].__str__())\n allin = allin + display + '\\n'\n return allin\n\n def remplir_alea(self, pourcent):\n \"\"\"fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes\"\"\"\n if 0 < int(pourcent) <= 100:\n cases = self.largeur * self.hauteur\n nombre = int(cases * (pourcent / 100))\n L_vivant = []\n while len(L_vivant) != nombre:\n y = randint(0, self.largeur - 1)\n x = randint(0, self.hauteur - 1)\n if (x, y) not in L_vivant:\n L_vivant.append((x, y))\n for i in range(0, self.hauteur):\n self.matrice.append([])\n for b in range(0, self.largeur):\n self.matrice[i].append(Cellule())\n if (i, b) in L_vivant:\n cellule = self.getXY(i, b)\n cellule.naitre()\n cellule.basculer()\n self.setXY(i, b, cellule)\n else:\n return False\n\n def Jeu(self):\n \"\"\"fonction qui calcule l'état futur de chaque cellule\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.set_voisins(self.get8voisins(i, b))\n cellule.calcule_etat_futur()\n self.setXY(i, b, cellule)\n\n def actualise(self):\n \"\"\"fonction qui actualise l'état de toutes le cellules\"\"\"\n for i in range(0, self.hauteur):\n for b in range(0, self.largeur):\n cellule = self.getXY(i, b)\n cellule.basculer()\n self.setXY(i, b, cellule)\n\n\ndef bakbak(*args):\n \"\"\"fonction qui affiche le jeu en interface graphique\"\"\"\n prépartie.pack_forget()\n plateau.clear_matrice()\n plateau.set_largeur(int(numberofcol.get()))\n plateau.set_hauteur(int(numberoflign.get()))\n tours = int(numberofturn.get())\n partie = Frame(fenetre, bg='#85c17e')\n plateau.remplir_alea(int(pourcent.get()))\n touracc = StringVar()\n printer = Label(partie, textvariable=touracc, bg='#85c17e', font=(\n 'Time News Roman', 19), fg='white')\n printer.pack(padx=10, pady=10)\n actuel = StringVar()\n actuel.set('')\n printer1 = Label(partie, textvariable=actuel, bg='#85c17e', font=(\n 'Time News Roman', 20))\n printer1.pack()\n\n def update(n=1, chain=''):\n \"\"\"fonction récursive qui prends pour paramètres un chaine de caractères et un nombre entier\"\"\"\n ch = ''\n if n < tours + 1:\n touracc.set('Tour: ' + str(n))\n plateau.Jeu()\n actuel.set(plateau.getallstate())\n plateau.actualise()\n ch = 'Tour: ' + str(n) + '\\n' + plateau.getallstate() + '\\n'\n chain = chain + ch\n partie.after(2000, update, n + 1, chain)\n else:\n\n def save():\n \"\"\"fonction qui sauvegarde chaque étape de la partie au forma .txt\"\"\"\n f = filedialog.asksaveasfilename(initialdir='/', title=\n 'Select file', filetypes=('txt files', ('all files',\n '*.*')))\n if f is None:\n return\n with open(f, 'w', encoding='utf-8') as result:\n result.write(chain)\n result.close()\n Button(partie, text='Recommencer', command=restart, bg=\n '#85c17e', font=('Noto Serif', 11)).pack(side=LEFT, padx=5)\n Button(partie, text='Sauvegarder la partie', command=save, bg=\n '#85c17e', font=('Noto Serif', 11)).pack(side=LEFT, padx=5)\n\n def restart():\n partie.destroy()\n prépartie.pack()\n update()\n Button(partie, text='Quitter le jeu (fermera complètement la fenêtre)',\n command=quit, bg='#85c17e', font=('Noto Serif', 11)).pack(side=LEFT,\n padx=10)\n partie.pack()\n\n\n<mask token>\n",
"step-5": "# -*- coding: utf-8 -*-\r\n'''\r\nLE JEU DE LA VIE\r\nMini projet numéro 2 de NSI\r\n\r\nModélisation Objet :\r\n\r\nQ1) On peut dégager, au premier abord : une classe cellule (avec un attribut état et un autre voisins) et une classe grille (avec un attribut ordonnée et un autre abscisse). En effet, ce sont les deux éléments du jeu.\r\n\r\nQ2) On pourrait donner une méthode pour changer l’état de la cellule, une autre pour obtenir son état. Une autre pour définir les voisins et encore une pour les obtenir. Avec ces méthodes, on pourra modifier l’état des cellules et calculer celui-ci en fonction de ses voisins.\r\nPour ce qui est de la classe grille, on pourrait coder une méthode pour obtenir les coordonnées, un autre pour les modifier. Ainsi, on pourra placer les cellules dans la grille là où il n’y en a pas forcément.\r\n\r\nQ3) Il sera plus simple de représenter la notion de voisinage dans la cellule avec la classe grille. En effet, c’est elle qui contient les coordonnées.\r\nQuand au calcul de celle-ci, elle sera plus simple dans la classe cellule car c’est cette classe qui contient l’état. \r\n\r\nQ4) Une cellule qui n’est pas au bord a 8 voisins. En effet, le voisinage de Moore compte les diagonales.\r\nUne cellule sur le côté mais pas dans un coin a 5 voisins.\r\nDans un coin, elle en a 3.\r\n\r\nQ5) Pour la case en haut à droite, on pourrait considérer comme voisin de droite la case tout en haut à gauche. Pour le voisin du haut, on peut considérer la case de même abscisse mais d’ordonnée 0 (celle tout en bas). En fait, on prendrait la case d’abscisse ou d’ordonnée « opposée » comme suivante. Cela afin de ne pas avoir que 3 cases prises en compte lors du calcul de l’état en fonction des voisins.\r\n\r\nQ8) Cela peut être utile pour vérifier facilement l’état d’une cellule. Ainsi, on peut l’interpréter dans une autre fonction ou même créer facilement une liste qui serait utilisée comme « historique » de la cellule.\r\n'''\r\n\r\nfrom random import randint\r\nfrom time import sleep\r\nfrom tkinter import filedialog\r\nfrom tkinter import *\r\n\r\nclass Cellule:\r\n def __init__(self):\r\n '''constructeur qui initialise les variables'''\r\n self.__actuel= False\r\n self.__futur= False\r\n self.__voisins= None\r\n \r\n def est_vivant(self):\r\n '''accesseur qui retourne l'état actuel de la cellule'''\r\n return self.__actuel\r\n \r\n def set_voisins(self,L):\r\n '''mutateur qui permet de modifier les voisins de la cellule. Prends en argument une liste'''\r\n if type(L)== list: #vérifie si l'élément donné est bien une liste\r\n self.__voisins= L\r\n \r\n def get_voisins(self):\r\n '''accesseur qui renvoie les voisins de la cellule'''\r\n return self.__voisins\r\n \r\n def naitre(self):\r\n '''mutateur qui passe l'état futur de la cellule à True'''\r\n self.__futur=True\r\n\r\n def mourir(self):\r\n '''mutateur qui passe l'état futur de la cellule à False'''\r\n self.__futur=False\r\n \r\n def basculer(self):\r\n '''mutateur qui change l'état actuel de la cellule pour l'état futur'''\r\n self.__actuel=self.__futur\r\n \r\n def __str__(self):\r\n '''méthode protégée qui renvoie 🌱 si la cellule est actuellement vivante. Sinon, elle renvoie 💀'''\r\n if self.__actuel== True:\r\n return \"🌱\"\r\n else:\r\n return \"💀\"\r\n\r\n def calcule_etat_futur(self):\r\n '''fonction qui calcule l'état futur des cellules via les règles du jeu'''\r\n acc=0 #compteur qui va permettre de savoir le nombre de voisins vivants\r\n for i in range (len(self.__voisins)):#parcours tout les voisins\r\n if self.__voisins[i]==True: #si le voisin est vivant\r\n acc=acc+1\r\n \r\n #vérifie toutes les possibilités offertes par les règles du voisinage de Moore\r\n if acc==3 and self.__actuel==False:\r\n self.naitre()\r\n if acc!=3 and self.__actuel==False:\r\n self.mourir()\r\n\r\n elif (acc==2 or acc==3) and self.__actuel==True:\r\n self.naitre()\r\n elif (acc!=2 or acc!=3) and self.__actuel==True:\r\n self.mourir()\r\n\r\nclass Grille:\r\n def __init__(self):\r\n '''constructeur qui initialise les variables'''\r\n self.largeur=20\r\n self.hauteur=30\r\n self.matrice=[]\r\n \r\n def clear_matrice(self):\r\n '''mutateur qui remet la matrice à 0'''\r\n self.matrice=[]\r\n\r\n def set_largeur(self,x):\r\n '''mutateur qui change la largeur de la grille. Prends un nombre entier en paramètre'''\r\n if type(x)==int:\r\n self.largeur=x\r\n\r\n def set_hauteur(self,x):\r\n '''mutateur qui change la hauteur de la grille. Prends un nombre entier en paramètre'''\r\n if type(x)==int:\r\n self.hauteur=x\r\n\r\n def dansgrille(self,i,j):\r\n '''Fonction qui prends en paramètres 2 points (nombres entiers) et dit si ils se trouvent dans la grille''' \r\n if self.hauteur-1>=i>=0 and self.largeur-1>=j>=0: #si les coordonées de l'utilisateur sont comprises entre 0 et la largeur/hauteur en fonction du point\r\n return True\r\n \r\n else:\r\n return False\r\n\r\n def setXY(self,i,j,valeur):\r\n '''mutateur qui prend en compte des coordonées (nombres entiers) et une valeur. Si les coordonées sont dans la liste, ajoute la valeur à cet endroit'''\r\n if self.dansgrille(i,j)==True:\r\n self.matrice[i][j]= valeur\r\n else:\r\n return 'out of range, not added'\r\n \r\n def getXY(self,i,j):\r\n '''accesseur qui renvoie la valeur de la celllule dans la coordonée rentée par l'utilisateur si celle ci est dans la grille'''\r\n if self.dansgrille(i,j)==True:\r\n return self.matrice[i][j]\r\n \r\n def get_largeur(self):\r\n '''accesseur qui retourne la largeur de la grille'''\r\n return self.largeur\r\n \r\n def get_hauteur(self):\r\n '''accesseur qui permet de récupérer la valeur de la hauteur de la grille'''\r\n return self.hauteur\r\n \r\n @staticmethod\r\n def est_voisins(i,j,x,y,instance):\r\n '''fonction qui prend en paramètres les coordonées de deux points et retourne True si ils remplissent la notion de voisinage selon Moore'''\r\n abx=None\r\n ordo=None\r\n if i==x and j==y: #si c'est le même point\r\n return False\r\n\r\n for b in range (-1,2):\r\n #regarde si i est en bordure, adapte le point voisin à chercher en fonction\r\n if b+i<0: \r\n abx= instance.largeur+b\r\n if b+i>instance.get_largeur()-1:\r\n abx=0\r\n else:\r\n abx=b+i\r\n for c in range(-1,2):\r\n #regarde si j est en bordure, adapte le point voisin à chercher en fonction\r\n if j+c<0:\r\n ordo= instance.get_hauteur()+c\r\n if j+c>instance.get_hauteur()-1:\r\n ordo=0\r\n else:\r\n ordo=c+j\r\n #compare le point à chercher avec x et y. Si ils correspondent, ça veut dire que les points sont voisins\r\n if x==abx and y==ordo:\r\n return True\r\n \r\n return False #si aucun des points n'est voisins. En effet, sinon la fonction s'arrête avec le \"return True\" à la ligne 162\r\n \r\n def get8voisins(self,i,j):\r\n '''fonction qui donne la liste des voisins d'une cellule si celle ci est dans la grille'''\r\n if self.dansgrille(i,j)==True:\r\n L_voisins=[]\r\n for b in range (-1,2):\r\n #regarde si i est en bordure, adapte le point voisin à chercher en fonction\r\n if b+i<0: \r\n abx= self.get_hauteur()+b\r\n elif b+i>self.get_hauteur()-1:\r\n abx=0\r\n else:\r\n abx=b+i\r\n for c in range(-1,2):\r\n #regarde si j est en bordure, adapte le point voisin à chercher en fonction\r\n if j+c<0:\r\n ordo= self.get_largeur()+c\r\n elif j+c>self.get_largeur()-1:\r\n ordo=0\r\n else:\r\n ordo=c+j\r\n \r\n #ajoute le point voisin\r\n if abx!=i or ordo!=j:\r\n data=self.getXY(abx,ordo)\r\n L_voisins.append(data.est_vivant())\r\n return L_voisins\r\n \r\n else:\r\n return None\r\n\r\n def __str__(self):\r\n '''fonction qui affiche le jeu de la vie dans le terminal'''\r\n #parcours tout les éléments du jeu\r\n for i in range(len(self.matrice)):\r\n display=[]\r\n for j in range(len(self.matrice[i])):\r\n display.append(self.matrice[i][j].__str__())#ajoute le résultat\r\n print(display)\r\n \r\n print('\\n')\r\n\r\n def getallstate(self):\r\n '''fonction qui renvoie tout les etats'''\r\n #parcours tout les éléments du jeu\r\n allin=\"\"\r\n for i in range(len(self.matrice)):\r\n display=\"\"\r\n for j in range(len(self.matrice[i])):\r\n display=display+str(self.matrice[i][j].__str__())#ajoute le résultat\r\n allin=allin+display+'\\n'\r\n return allin\r\n \r\n\r\n def remplir_alea(self,pourcent):\r\n '''fonction qui prends en paramètres un nombre entier faisant office de pourcentage et qui rempli la matrice de cellule. Un poucentage (celui rentré) de ces cellules sont aléatoirement vivantes'''\r\n if 0<int(pourcent)<=100:\r\n cases=self.largeur*self.hauteur\r\n nombre=int(cases*(pourcent/100))# calcule le nombre de cellules devant être vivantes\r\n L_vivant=[]\r\n \r\n while len(L_vivant)!=nombre: #boucle qui crée des ordonées ou abscisses aléatoire jusqu'a ce que le nombre demandé soit atteint\r\n y= randint(0,self.largeur-1)\r\n x= randint(0, self.hauteur-1)\r\n if (x,y) not in L_vivant: #ajoute l'abscisse si elle n'est pas deja dans la liste\r\n L_vivant.append((x,y))\r\n \r\n for i in range(0,self.hauteur):\r\n self.matrice.append([])#crée une nouvelle liste pour stocker une ligne supplémentaire\r\n for b in range(0,self.largeur):\r\n self.matrice[i].append(Cellule())#ajoute une cellule dans la liste\r\n if (i,b) in L_vivant: #si la cellule est dans la liste de celles devant être vivantes\r\n cellule=self.getXY(i,b) #obtient la cellule\r\n cellule.naitre()#passer l'état futur de la celllule à vivant\r\n cellule.basculer()#fait naître la cellule\r\n self.setXY(i,b,cellule) #remplace la cellule \"morte\"\r\n else:\r\n return False\r\n\r\n def Jeu(self):\r\n '''fonction qui calcule l'état futur de chaque cellule'''\r\n for i in range(0,self.hauteur): #parcours chaque point en hauteur\r\n for b in range(0,self.largeur):#parcours chaque point en largeur\r\n cellule=self.getXY(i,b) #obtiens les informations\r\n cellule.set_voisins(self.get8voisins(i,b))#change la valeur des voisins du point\r\n cellule.calcule_etat_futur()\r\n self.setXY(i,b,cellule)#change les données de la cellule dans la matrice\r\n \r\n def actualise(self):\r\n '''fonction qui actualise l'état de toutes le cellules'''\r\n for i in range(0,self.hauteur):#parcours chaque point en hauteur\r\n for b in range(0,self.largeur):#parcours chaque point en largeur\r\n cellule=self.getXY(i,b) #obtiens la cellule\r\n cellule.basculer() #bascule l'état\r\n self.setXY(i,b,cellule) #change l'état de la cellule dans la matrice\r\n\r\ndef bakbak(*args):\r\n '''fonction qui affiche le jeu en interface graphique'''\r\n prépartie.pack_forget() #cache l'écran de paramètrage\r\n plateau.clear_matrice() #nettoye le plateau\r\n plateau.set_largeur(int(numberofcol.get()))#change la largeur du plateau selon celle renseignée\r\n plateau.set_hauteur(int(numberoflign.get()))#change la hauteur du plateau selon celle renseignée\r\n tours=int(numberofturn.get()) #change le nombre de tours\r\n partie=Frame(fenetre,bg='#85c17e')\r\n plateau.remplir_alea(int(pourcent.get()))#crée le plateau de base\r\n touracc=StringVar()\r\n printer=Label(partie, textvariable=touracc,bg=\"#85c17e\",font=('Time News Roman', 19), fg=\"white\")#crée une zone de texte dynamique pour le nombre de tour\r\n printer.pack(padx=10,pady=10)\r\n actuel=StringVar()\r\n actuel.set('')\r\n printer1=Label(partie, textvariable=actuel,bg=\"#85c17e\",font=('Time News Roman', 20))#crée une zone de texte dynamique pour le plateau\r\n printer1.pack()\r\n \r\n\r\n def update(n=1,chain=''):\r\n '''fonction récursive qui prends pour paramètres un chaine de caractères et un nombre entier'''\r\n ch=''\r\n if n<tours+1:#si le nombre de tours demandés n'a pas été effectué\r\n touracc.set(\"Tour: \"+str(n)) #affiche le numéro du tour\r\n plateau.Jeu()#actualise le jeu\r\n actuel.set(plateau.getallstate())#affiche le plateau pour ce tour\r\n plateau.actualise()#actualise le tour\r\n ch=\"Tour: \"+str(n)+\"\\n\"+plateau.getallstate()+'\\n' #stocke les infos du tour\r\n chain=chain+ch#les ajoute à l'historique des tours\r\n partie.after(2000, update, n+1,chain)#attends 2 secondes puis recommence\r\n else:#si le nombre de tours demandés a été effectué\r\n def save():\r\n '''fonction qui sauvegarde chaque étape de la partie au forma .txt'''\r\n f = filedialog.asksaveasfilename(initialdir = \"/\",title = \"Select file\",filetypes = ((\"txt files\"),(\"all files\",\"*.*\")))\r\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\r\n return\r\n with open(f,'w',encoding='utf-8') as result: #enregsitre le fichier sous le format UTF-8\r\n result.write(chain)\r\n result.close()\r\n Button(partie,text=\"Recommencer\",command=restart,bg=\"#85c17e\",font=('Noto Serif', 11)).pack(side=LEFT,padx=5) #affiche un bouton pour resélectionner des paramètres\r\n Button(partie,text='Sauvegarder la partie',command=save,bg=\"#85c17e\",font=('Noto Serif', 11)).pack(side=LEFT,padx=5) #affiche un bouton pour sauvegarder l'historique de la partie\r\n \r\n def restart():\r\n partie.destroy()\r\n prépartie.pack()\r\n\r\n update()\r\n Button(partie,text=\"Quitter le jeu (fermera complètement la fenêtre)\",command=quit,bg=\"#85c17e\",font=('Noto Serif', 11)).pack(side=LEFT,padx=10) #affiche un bouton pour quitter la fenêtre\r\n partie.pack()\r\n \r\nfenetre=Tk()\r\nfenetre.title(\"Le jeu de la vie\")\r\nfenetre['bg']='#85c17e' #crée une couleur de fond verte\r\nfenetre.geometry(\"1000x500\") #choisis les dimensions de la fenêtre\r\nplateau=Grille()\r\n\r\n#titre de présentation\r\ntitle= Label(fenetre, text=\"Le jeu de la vie\", bg=\"#85c17e\", font=('Courier New', 30), fg=\"white\")\r\ntitle.pack(padx=10)\r\n\r\n\r\n'''Prépartie'''\r\n\r\nprépartie=Frame(fenetre,borderwidth=2,relief='ridge',bg=\"#85c17e\") #crée un cadre pour contenir tout les cadres de preparties\r\nchoix=Frame(prépartie,bg=\"#85c17e\")#frame pour contenir le choix du pourcent\r\ntitlepourcent=Label(choix, text=\"Entrez le pourcentage de cellules vivantes souhaitées au démarrage \",bg=\"#85c17e\",font=('Noto Serif', 11))\r\ntitlepourcent.pack(side=LEFT,padx=10)\r\n\r\n'''pourcent'''\r\nvalue = DoubleVar()\r\npourcent=Scale(choix, variable=value,cursor='dot',orient=HORIZONTAL,bg=\"#85c17e\",troughcolor='white')#reglètte pour choisir le pourcentage\r\npourcent.pack(side=LEFT,padx=5,pady=10)\r\n\r\n\r\n'''nombre de tours'''\r\nnombretour=Frame(prépartie,bg=\"#85c17e\")#contient le choix du nombre de tour\r\nLabel(nombretour, text=\"Entrez le nombre de tour souhaités\",bg=\"#85c17e\",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)\r\nnumberofturn = Spinbox(nombretour, from_=1, to=100,bg=\"#85c17e\",buttonbackground='white',cursor='dot') #spinbox pour choisir le nombre de tours\r\nnumberofturn.pack()\r\n\r\n'''hauteur et largeur'''\r\nnombrecolonnes=Frame(prépartie,bg=\"#85c17e\",pady=10)#contient le choix du nombre de colonnes\r\nLabel(nombrecolonnes, text=\"Entrez le nombre de colonnes souhaitées\",bg=\"#85c17e\",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)\r\nnumberofcol = Spinbox(nombrecolonnes, from_=1, to=100,bg=\"#85c17e\",buttonbackground='white',cursor='dot')#spinbox pour choisir le nombre de colonnes\r\nnumberofcol.pack()\r\n\r\nnombrelignes=Frame(prépartie,bg=\"#85c17e\",pady=10)#contient le choix du nombre de colonnes\r\nLabel(nombrelignes, text=\"Entrez le nombre de lignes souhaitées\",bg=\"#85c17e\",font=('Noto Serif', 11)).pack(side=LEFT,padx=10)\r\nnumberoflign = Spinbox(nombrelignes, from_=1, to=100,bg=\"#85c17e\",buttonbackground='white',cursor='dot')#spinbox pour choisir le nombre de lignes\r\nnumberoflign.pack()\r\n\r\n'''validation'''\r\nvalidate= Frame(prépartie,bg=\"#B76E79\")\r\nButton(validate,text=\"Valider et lancer le jeu\",command=bakbak,bg=\"#85c17e\", activebackground='white',cursor='star',font=('Noto Serif', 11)).pack(side=LEFT,padx=10,pady=15) #bouton qui permet de valider les paramètres séléctionnés\r\n\r\n\r\nprépartie.pack()\r\nchoix.pack()\r\nnombretour.pack()\r\nnombrecolonnes.pack()\r\nnombrelignes.pack()\r\nvalidate.pack()\r\n\r\nfenetre.mainloop()\r\n\r\n\r\n\r\n \r\n",
"step-ids": [
20,
23,
24,
28,
32
]
}
|
[
20,
23,
24,
28,
32
] |
#coding=utf-8
from selenium import webdriver
from selenium.webdriver import ActionChains
# 常用鼠标操作
driver = webdriver.Chrome()
driver.get('https://www.baidu.com')
driver.maximize_window()
element = driver.find_element_by_link_text(u"新闻")
#˫ 双击 ‘新闻’ 这个超链接
ActionChains(driver).double_click(element).perform()
import time
time.sleep(2)
driver.quit()
# 右键 单击 ‘新闻’
element = driver.find_element_by_link_text('地图')
ActionChains(driver).context_click(element).perform()
time.sleep(2)
# driver.quit()
|
normal
|
{
"blob_id": "e3f180d4309ade39ac42a895f7f73469fd20724f",
"index": 4538,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\n<mask token>\nActionChains(driver).double_click(element).perform()\n<mask token>\ntime.sleep(2)\ndriver.quit()\n<mask token>\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\nelement = driver.find_element_by_link_text(u'新闻')\nActionChains(driver).double_click(element).perform()\n<mask token>\ntime.sleep(2)\ndriver.quit()\nelement = driver.find_element_by_link_text('地图')\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n",
"step-4": "from selenium import webdriver\nfrom selenium.webdriver import ActionChains\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\nelement = driver.find_element_by_link_text(u'新闻')\nActionChains(driver).double_click(element).perform()\nimport time\ntime.sleep(2)\ndriver.quit()\nelement = driver.find_element_by_link_text('地图')\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n",
"step-5": "#coding=utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\n\n# 常用鼠标操作\ndriver = webdriver.Chrome()\ndriver.get('https://www.baidu.com')\ndriver.maximize_window()\nelement = driver.find_element_by_link_text(u\"新闻\")\n#˫ 双击 ‘新闻’ 这个超链接\nActionChains(driver).double_click(element).perform()\nimport time\ntime.sleep(2)\ndriver.quit()\n# 右键 单击 ‘新闻’\nelement = driver.find_element_by_link_text('地图')\nActionChains(driver).context_click(element).perform()\ntime.sleep(2)\n# driver.quit()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import vk_loader.vk_api as vk
from config import config
import uuid
import requests
from models import session, Meme
import os
PHOTO_URL_FIELDS = [
'photo_75',
'photo_130',
'photo_604',
'photo_807',
'photo_1280',
'photo_2560'
]
conf = config('loader', default={
'access_token': 'Enter VK access token here.',
'sources': [],
'load_limit_per_source': 20,
'remember_loaded_ids': 50,
'images_dir': 'img/'
})
def get_random_id():
return uuid.uuid4().hex
def is_post_meme(post):
if 'id' not in post:
return False
if 'attachments' not in post:
return False
if 'is_pinned' in post and post['is_pinned'] == 1:
return False
if 'marked_as_ads' in post and post['marked_as_ads'] == 1:
return False
attachments = post['attachments']
if type(attachments) != list:
return False
if len(attachments) != 1:
return False
photo = attachments[0]
if 'type' not in photo:
return False
if photo['type'] != 'photo' or 'photo' not in photo:
return False
return True
def get_last_loaded_ids(source_id):
try:
with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:
return list(map(lambda s: int(s.replace('\n', '')), file.readlines()))
except IOError:
return []
def save_loaded_ids(source_id, ids):
actual = ids + get_last_loaded_ids(source_id)
remember = conf['remember_loaded_ids']
if len(actual) > remember:
actual = actual[:remember]
try:
with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:
file.write('\n'.join(map(str, actual)))
except IOError:
print('Can\'t save ids!')
def get_unique_post_id(source_id, post_id):
return str(source_id) + '_' + str(post_id)
def get_new_posts():
result = []
for source_id in conf['sources']:
loaded_ids = set(get_last_loaded_ids(source_id))
to_save = []
finished = False
considered = 0
while not finished:
posts = vk.get_posts(source_id, offset=considered)['items']
count = len(posts)
if count == 0:
finished = True
continue
for item in posts:
if considered == conf['load_limit_per_source']:
finished = True
break
considered += 1
if 'id' not in item:
continue
post_id = item['id']
if post_id in loaded_ids:
finished = True
continue
to_save.append(post_id)
result.append(item)
if len(to_save) > 0:
save_loaded_ids(source_id, to_save)
return result
def download(url, filename):
with open(filename, "wb") as file:
response = requests.get(url)
file.write(response.content)
def __main__():
os.makedirs(conf['images_dir'], exist_ok=True)
os.makedirs('vk_loader/loaded_ids', exist_ok=True)
posts = get_new_posts()
posts = filter(is_post_meme, posts)
for post in posts:
photo = post['attachments'][0]['photo']
ptr = len(PHOTO_URL_FIELDS) - 1
while ptr >= 0 and PHOTO_URL_FIELDS[ptr] not in photo:
ptr -= 1
if ptr < 0:
continue
photo_url = photo[PHOTO_URL_FIELDS[ptr]]
assert(photo_url.endswith('.jpg'))
photo_id = get_random_id()
try:
print('loading', photo_id, photo_url)
download(photo_url, conf['images_dir'] + photo_id + '.jpg')
except IOError:
print('Downloading/saving an image failed!')
continue
session.add(Meme(img=photo_id))
session.commit()
__main__()
|
normal
|
{
"blob_id": "cb742701094a8060e524ba22a0af2f969bdbf3d9",
"index": 2365,
"step-1": "<mask token>\n\n\ndef get_random_id():\n return uuid.uuid4().hex\n\n\n<mask token>\n\n\ndef get_last_loaded_ids(source_id):\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:\n return list(map(lambda s: int(s.replace('\\n', '')), file.\n readlines()))\n except IOError:\n return []\n\n\ndef save_loaded_ids(source_id, ids):\n actual = ids + get_last_loaded_ids(source_id)\n remember = conf['remember_loaded_ids']\n if len(actual) > remember:\n actual = actual[:remember]\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:\n file.write('\\n'.join(map(str, actual)))\n except IOError:\n print(\"Can't save ids!\")\n\n\n<mask token>\n\n\ndef get_new_posts():\n result = []\n for source_id in conf['sources']:\n loaded_ids = set(get_last_loaded_ids(source_id))\n to_save = []\n finished = False\n considered = 0\n while not finished:\n posts = vk.get_posts(source_id, offset=considered)['items']\n count = len(posts)\n if count == 0:\n finished = True\n continue\n for item in posts:\n if considered == conf['load_limit_per_source']:\n finished = True\n break\n considered += 1\n if 'id' not in item:\n continue\n post_id = item['id']\n if post_id in loaded_ids:\n finished = True\n continue\n to_save.append(post_id)\n result.append(item)\n if len(to_save) > 0:\n save_loaded_ids(source_id, to_save)\n return result\n\n\ndef download(url, filename):\n with open(filename, 'wb') as file:\n response = requests.get(url)\n file.write(response.content)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_random_id():\n return uuid.uuid4().hex\n\n\ndef is_post_meme(post):\n if 'id' not in post:\n return False\n if 'attachments' not in post:\n return False\n if 'is_pinned' in post and post['is_pinned'] == 1:\n return False\n if 'marked_as_ads' in post and post['marked_as_ads'] == 1:\n return False\n attachments = post['attachments']\n if type(attachments) != list:\n return False\n if len(attachments) != 1:\n return False\n photo = attachments[0]\n if 'type' not in photo:\n return False\n if photo['type'] != 'photo' or 'photo' not in photo:\n return False\n return True\n\n\ndef get_last_loaded_ids(source_id):\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:\n return list(map(lambda s: int(s.replace('\\n', '')), file.\n readlines()))\n except IOError:\n return []\n\n\ndef save_loaded_ids(source_id, ids):\n actual = ids + get_last_loaded_ids(source_id)\n remember = conf['remember_loaded_ids']\n if len(actual) > remember:\n actual = actual[:remember]\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:\n file.write('\\n'.join(map(str, actual)))\n except IOError:\n print(\"Can't save ids!\")\n\n\n<mask token>\n\n\ndef get_new_posts():\n result = []\n for source_id in conf['sources']:\n loaded_ids = set(get_last_loaded_ids(source_id))\n to_save = []\n finished = False\n considered = 0\n while not finished:\n posts = vk.get_posts(source_id, offset=considered)['items']\n count = len(posts)\n if count == 0:\n finished = True\n continue\n for item in posts:\n if considered == conf['load_limit_per_source']:\n finished = True\n break\n considered += 1\n if 'id' not in item:\n continue\n post_id = item['id']\n if post_id in loaded_ids:\n finished = True\n continue\n to_save.append(post_id)\n result.append(item)\n if len(to_save) > 0:\n save_loaded_ids(source_id, to_save)\n return result\n\n\ndef download(url, filename):\n with open(filename, 'wb') as file:\n response = requests.get(url)\n file.write(response.content)\n\n\ndef __main__():\n os.makedirs(conf['images_dir'], exist_ok=True)\n os.makedirs('vk_loader/loaded_ids', exist_ok=True)\n posts = get_new_posts()\n posts = filter(is_post_meme, posts)\n for post in posts:\n photo = post['attachments'][0]['photo']\n ptr = len(PHOTO_URL_FIELDS) - 1\n while ptr >= 0 and PHOTO_URL_FIELDS[ptr] not in photo:\n ptr -= 1\n if ptr < 0:\n continue\n photo_url = photo[PHOTO_URL_FIELDS[ptr]]\n assert photo_url.endswith('.jpg')\n photo_id = get_random_id()\n try:\n print('loading', photo_id, photo_url)\n download(photo_url, conf['images_dir'] + photo_id + '.jpg')\n except IOError:\n print('Downloading/saving an image failed!')\n continue\n session.add(Meme(img=photo_id))\n session.commit()\n\n\n<mask token>\n",
"step-3": "<mask token>\nPHOTO_URL_FIELDS = ['photo_75', 'photo_130', 'photo_604', 'photo_807',\n 'photo_1280', 'photo_2560']\nconf = config('loader', default={'access_token':\n 'Enter VK access token here.', 'sources': [], 'load_limit_per_source': \n 20, 'remember_loaded_ids': 50, 'images_dir': 'img/'})\n\n\ndef get_random_id():\n return uuid.uuid4().hex\n\n\ndef is_post_meme(post):\n if 'id' not in post:\n return False\n if 'attachments' not in post:\n return False\n if 'is_pinned' in post and post['is_pinned'] == 1:\n return False\n if 'marked_as_ads' in post and post['marked_as_ads'] == 1:\n return False\n attachments = post['attachments']\n if type(attachments) != list:\n return False\n if len(attachments) != 1:\n return False\n photo = attachments[0]\n if 'type' not in photo:\n return False\n if photo['type'] != 'photo' or 'photo' not in photo:\n return False\n return True\n\n\ndef get_last_loaded_ids(source_id):\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:\n return list(map(lambda s: int(s.replace('\\n', '')), file.\n readlines()))\n except IOError:\n return []\n\n\ndef save_loaded_ids(source_id, ids):\n actual = ids + get_last_loaded_ids(source_id)\n remember = conf['remember_loaded_ids']\n if len(actual) > remember:\n actual = actual[:remember]\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:\n file.write('\\n'.join(map(str, actual)))\n except IOError:\n print(\"Can't save ids!\")\n\n\ndef get_unique_post_id(source_id, post_id):\n return str(source_id) + '_' + str(post_id)\n\n\ndef get_new_posts():\n result = []\n for source_id in conf['sources']:\n loaded_ids = set(get_last_loaded_ids(source_id))\n to_save = []\n finished = False\n considered = 0\n while not finished:\n posts = vk.get_posts(source_id, offset=considered)['items']\n count = len(posts)\n if count == 0:\n finished = True\n continue\n for item in posts:\n if considered == conf['load_limit_per_source']:\n finished = True\n break\n considered += 1\n if 'id' not in item:\n continue\n post_id = item['id']\n if post_id in loaded_ids:\n finished = True\n continue\n to_save.append(post_id)\n result.append(item)\n if len(to_save) > 0:\n save_loaded_ids(source_id, to_save)\n return result\n\n\ndef download(url, filename):\n with open(filename, 'wb') as file:\n response = requests.get(url)\n file.write(response.content)\n\n\ndef __main__():\n os.makedirs(conf['images_dir'], exist_ok=True)\n os.makedirs('vk_loader/loaded_ids', exist_ok=True)\n posts = get_new_posts()\n posts = filter(is_post_meme, posts)\n for post in posts:\n photo = post['attachments'][0]['photo']\n ptr = len(PHOTO_URL_FIELDS) - 1\n while ptr >= 0 and PHOTO_URL_FIELDS[ptr] not in photo:\n ptr -= 1\n if ptr < 0:\n continue\n photo_url = photo[PHOTO_URL_FIELDS[ptr]]\n assert photo_url.endswith('.jpg')\n photo_id = get_random_id()\n try:\n print('loading', photo_id, photo_url)\n download(photo_url, conf['images_dir'] + photo_id + '.jpg')\n except IOError:\n print('Downloading/saving an image failed!')\n continue\n session.add(Meme(img=photo_id))\n session.commit()\n\n\n__main__()\n",
"step-4": "import vk_loader.vk_api as vk\nfrom config import config\nimport uuid\nimport requests\nfrom models import session, Meme\nimport os\nPHOTO_URL_FIELDS = ['photo_75', 'photo_130', 'photo_604', 'photo_807',\n 'photo_1280', 'photo_2560']\nconf = config('loader', default={'access_token':\n 'Enter VK access token here.', 'sources': [], 'load_limit_per_source': \n 20, 'remember_loaded_ids': 50, 'images_dir': 'img/'})\n\n\ndef get_random_id():\n return uuid.uuid4().hex\n\n\ndef is_post_meme(post):\n if 'id' not in post:\n return False\n if 'attachments' not in post:\n return False\n if 'is_pinned' in post and post['is_pinned'] == 1:\n return False\n if 'marked_as_ads' in post and post['marked_as_ads'] == 1:\n return False\n attachments = post['attachments']\n if type(attachments) != list:\n return False\n if len(attachments) != 1:\n return False\n photo = attachments[0]\n if 'type' not in photo:\n return False\n if photo['type'] != 'photo' or 'photo' not in photo:\n return False\n return True\n\n\ndef get_last_loaded_ids(source_id):\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:\n return list(map(lambda s: int(s.replace('\\n', '')), file.\n readlines()))\n except IOError:\n return []\n\n\ndef save_loaded_ids(source_id, ids):\n actual = ids + get_last_loaded_ids(source_id)\n remember = conf['remember_loaded_ids']\n if len(actual) > remember:\n actual = actual[:remember]\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:\n file.write('\\n'.join(map(str, actual)))\n except IOError:\n print(\"Can't save ids!\")\n\n\ndef get_unique_post_id(source_id, post_id):\n return str(source_id) + '_' + str(post_id)\n\n\ndef get_new_posts():\n result = []\n for source_id in conf['sources']:\n loaded_ids = set(get_last_loaded_ids(source_id))\n to_save = []\n finished = False\n considered = 0\n while not finished:\n posts = vk.get_posts(source_id, offset=considered)['items']\n count = len(posts)\n if count == 0:\n finished = True\n continue\n for item in posts:\n if considered == conf['load_limit_per_source']:\n finished = True\n break\n considered += 1\n if 'id' not in item:\n continue\n post_id = item['id']\n if post_id in loaded_ids:\n finished = True\n continue\n to_save.append(post_id)\n result.append(item)\n if len(to_save) > 0:\n save_loaded_ids(source_id, to_save)\n return result\n\n\ndef download(url, filename):\n with open(filename, 'wb') as file:\n response = requests.get(url)\n file.write(response.content)\n\n\ndef __main__():\n os.makedirs(conf['images_dir'], exist_ok=True)\n os.makedirs('vk_loader/loaded_ids', exist_ok=True)\n posts = get_new_posts()\n posts = filter(is_post_meme, posts)\n for post in posts:\n photo = post['attachments'][0]['photo']\n ptr = len(PHOTO_URL_FIELDS) - 1\n while ptr >= 0 and PHOTO_URL_FIELDS[ptr] not in photo:\n ptr -= 1\n if ptr < 0:\n continue\n photo_url = photo[PHOTO_URL_FIELDS[ptr]]\n assert photo_url.endswith('.jpg')\n photo_id = get_random_id()\n try:\n print('loading', photo_id, photo_url)\n download(photo_url, conf['images_dir'] + photo_id + '.jpg')\n except IOError:\n print('Downloading/saving an image failed!')\n continue\n session.add(Meme(img=photo_id))\n session.commit()\n\n\n__main__()\n",
"step-5": "import vk_loader.vk_api as vk\nfrom config import config\nimport uuid\nimport requests\nfrom models import session, Meme\nimport os\n\nPHOTO_URL_FIELDS = [\n 'photo_75',\n 'photo_130',\n 'photo_604',\n 'photo_807',\n 'photo_1280',\n 'photo_2560'\n]\n\n\nconf = config('loader', default={\n 'access_token': 'Enter VK access token here.',\n 'sources': [],\n 'load_limit_per_source': 20,\n 'remember_loaded_ids': 50,\n 'images_dir': 'img/'\n})\n\n\ndef get_random_id():\n return uuid.uuid4().hex\n\n\ndef is_post_meme(post):\n if 'id' not in post:\n return False\n\n if 'attachments' not in post:\n return False\n\n if 'is_pinned' in post and post['is_pinned'] == 1:\n return False\n\n if 'marked_as_ads' in post and post['marked_as_ads'] == 1:\n return False\n\n attachments = post['attachments']\n\n if type(attachments) != list:\n return False\n\n if len(attachments) != 1:\n return False\n\n photo = attachments[0]\n\n if 'type' not in photo:\n return False\n\n if photo['type'] != 'photo' or 'photo' not in photo:\n return False\n\n return True\n\n\ndef get_last_loaded_ids(source_id):\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'r') as file:\n return list(map(lambda s: int(s.replace('\\n', '')), file.readlines()))\n except IOError:\n return []\n\n\ndef save_loaded_ids(source_id, ids):\n actual = ids + get_last_loaded_ids(source_id)\n remember = conf['remember_loaded_ids']\n if len(actual) > remember:\n actual = actual[:remember]\n try:\n with open('vk_loader/loaded_ids/' + str(source_id), 'w') as file:\n file.write('\\n'.join(map(str, actual)))\n except IOError:\n print('Can\\'t save ids!')\n\n\ndef get_unique_post_id(source_id, post_id):\n return str(source_id) + '_' + str(post_id)\n\n\ndef get_new_posts():\n result = []\n\n for source_id in conf['sources']:\n loaded_ids = set(get_last_loaded_ids(source_id))\n to_save = []\n\n finished = False\n considered = 0\n\n while not finished:\n posts = vk.get_posts(source_id, offset=considered)['items']\n count = len(posts)\n\n if count == 0:\n finished = True\n continue\n\n for item in posts:\n if considered == conf['load_limit_per_source']:\n finished = True\n break\n\n considered += 1\n\n if 'id' not in item:\n continue\n\n post_id = item['id']\n if post_id in loaded_ids:\n finished = True\n continue\n\n to_save.append(post_id)\n result.append(item)\n\n if len(to_save) > 0:\n save_loaded_ids(source_id, to_save)\n\n return result\n\n\ndef download(url, filename):\n with open(filename, \"wb\") as file:\n response = requests.get(url)\n file.write(response.content)\n\n\ndef __main__():\n os.makedirs(conf['images_dir'], exist_ok=True)\n os.makedirs('vk_loader/loaded_ids', exist_ok=True)\n posts = get_new_posts()\n posts = filter(is_post_meme, posts)\n\n for post in posts:\n photo = post['attachments'][0]['photo']\n ptr = len(PHOTO_URL_FIELDS) - 1\n\n while ptr >= 0 and PHOTO_URL_FIELDS[ptr] not in photo:\n ptr -= 1\n\n if ptr < 0:\n continue\n\n photo_url = photo[PHOTO_URL_FIELDS[ptr]]\n assert(photo_url.endswith('.jpg'))\n\n photo_id = get_random_id()\n\n try:\n print('loading', photo_id, photo_url)\n download(photo_url, conf['images_dir'] + photo_id + '.jpg')\n except IOError:\n print('Downloading/saving an image failed!')\n continue\n\n session.add(Meme(img=photo_id))\n\n session.commit()\n\n\n__main__()\n",
"step-ids": [
5,
7,
10,
11,
12
]
}
|
[
5,
7,
10,
11,
12
] |
from flask import Flask, request, g
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from flask import jsonify
import json
import eth_account
import algosdk
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import load_only
from datetime import datetime
import sys
from models import Base, Order, Log
engine = create_engine('sqlite:///orders.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
app = Flask(__name__)
# These decorators allow you to use g.session to access the database inside the request code
# g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals
@app.before_request
def create_session():
g.session = scoped_session(DBSession)
@app.teardown_appcontext
# def shutdown_session(response_or_exc):
def shutdown_session(exception=None):
sys.stdout.flush()
g.session.commit()
g.session.remove()
""" Suggested helper methods """
# check whether “sig” is a valid signature of json.dumps(payload),
# using the signature algorithm specified by the platform field.
# Be sure to verify the payload using the sender_pk.
def check_sig(payload,sig):
pk = payload['sender_pk']
platform = payload['platform']
payload_json = json.dumps(payload)
result = False
if platform == "Algorand":
print("Algorand")
if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):
print("Algo sig verifies!")
result = True
elif platform == "Ethereum":
print("Ethereum")
eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)
if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:
print("Eth sig verifies!")
result = True
return result, payload_json
# def fill_order(order,txes=[]):
# pass
# the inner recursive function
def fill_order():
# get the order you just inserted from the DB
current_order = g.session.query(Order).order_by(Order.id.desc()).first()
# print("_order_id")
# print(current_order.id)
# Check if there are any existing orders that match and add them into a list
order_list = []
orders = g.session.query(Order).filter(Order.filled == None).all()
for existing_order in orders:
# if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):
if ((existing_order.buy_currency == current_order.sell_currency)
and (existing_order.sell_currency == current_order.buy_currency)
and (existing_order.sell_amount / existing_order.buy_amount
>= current_order.buy_amount / current_order.sell_amount)
and (existing_order.counterparty_id == None)):
order_list.append(existing_order)
# If a match is found between order and existing_order
if (len(order_list) > 0):
# print(" order_list_length")
# print(len(order_list))
# pick the first one in the list
match_order = order_list[0]
# Set the filled field to be the current timestamp on both orders
# Set counterparty_id to be the id of the other order
match_order.filled = datetime.now()
current_order.filled = datetime.now()
match_order.counterparty_id = current_order.id
current_order.counterparty_id = match_order.id
g.session.commit()
# if both orders can completely fill each other
# no child order needs to be generated
# If match_order is not completely filled
if (current_order.sell_amount < match_order.buy_amount):
# print("_match_order is not completely filled")
diff = match_order.buy_amount - current_order.sell_amount
exchange_rate_match = match_order.sell_amount / match_order.buy_amount
sell_amount_new_match = diff * exchange_rate_match
# print(match_order.id)
# print(diff)
# print(sell_amount_new_match)
new_order = Order(sender_pk=match_order.sender_pk,
receiver_pk=match_order.receiver_pk,
buy_currency=match_order.buy_currency,
sell_currency=match_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_match,
creator_id=match_order.id)
g.session.add(new_order)
g.session.commit()
print("M")
fill_order()
# If current_order is not completely filled
if (current_order.buy_amount > match_order.sell_amount):
# print("_current_order is not completely filled")
diff = current_order.buy_amount - match_order.sell_amount
exchange_rate_current = current_order.buy_amount / current_order.sell_amount
sell_amount_new_current = diff / exchange_rate_current
# print(current_order.id)
# print(diff)
# print(sell_amount_new_current)
new_order = Order(sender_pk=current_order.sender_pk,
receiver_pk=current_order.receiver_pk,
buy_currency=current_order.buy_currency,
sell_currency=current_order.sell_currency,
buy_amount=diff,
sell_amount=sell_amount_new_current,
creator_id=current_order.id)
g.session.add(new_order)
g.session.commit()
print("C")
fill_order()
# Takes input dictionary d and writes it to the Log table
# Hint: use json.dumps or str() to get it in a nice string form
def log_message(d):
create_session()
order_obj = Log(message=d)
g.session.add(order_obj)
shutdown_session()
# convert a row in DB into a dict
def row2dict(row):
return {
c.name: getattr(row, c.name)
for c in row.__table__.columns
}
# print a dictionary nicely
def print_dict(d):
for key, value in d.items():
print(key, ' : ', value)
""" End of helper methods """
@app.route('/trade', methods=['POST'])
def trade():
print("In trade endpoint")
if request.method == "POST":
print("--------- trade ---------")
content = request.get_json(silent=True)
print( f"content = {json.dumps(content)}" )
columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ]
fields = [ "sig", "payload" ]
# check whether the input contains both "sig" and "payload"
for field in fields:
if not field in content.keys():
print( f"{field} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
# check whether the input contains all 7 fields of payload
for column in columns:
if not column in content['payload'].keys():
print( f"{column} not received by Trade" )
print( json.dumps(content) )
log_message(content)
return jsonify( False )
#Your code here
#Note that you can access the database session using g.session
# TODO 1: Check the signature
# extract contents from json
sig = content['sig']
payload = content['payload']
platform = payload['platform']
# The platform must be either “Algorand” or "Ethereum".
platforms = ["Algorand", "Ethereum"]
if not platform in platforms:
print("input platform is not Algorand or Ethereum")
return jsonify(False)
# check signature
check_result = check_sig(payload,sig)
result = check_result[0]
payload_json = check_result[1]
# TODO 2: Add the order to the database
# TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful
# If the signature does not verify, do not insert the order into the “Order” table.
# Instead, insert a record into the “Log” table, with the message field set to be json.dumps(payload).
if result is False:
print("signature does NOT verify")
log_message(payload_json)
return jsonify(result)
# If the signature verifies, store the signature,
# as well as all of the fields under the ‘payload’ in the “Order” table EXCEPT for 'platform’.
if result is True:
print("signature verifies")
create_session()
order_obj = Order(sender_pk=payload['sender_pk'],
receiver_pk=payload['receiver_pk'],
buy_currency=payload['buy_currency'],
sell_currency=payload['sell_currency'],
buy_amount=payload['buy_amount'],
sell_amount=payload['sell_amount'],
signature=sig)
g.session.add(order_obj)
# TODO 3: Fill the order
fill_order()
shutdown_session()
return jsonify(result)
@app.route('/order_book')
def order_book():
#Your code here
#Note that you can access the database session using g.session
# The “/order_book” endpoint should return a list of all orders in the database.
# The response should contain a single key “data” that refers to a list of orders formatted as JSON.
# Each order should be a dict with (at least) the following fields
# ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", “signature”).
print("--------- order_book ---------")
create_session()
# get orders from DB into a list
order_dict_list = [
row2dict(order)
for order in g.session.query(Order).all()
]
# add the list into a dict
result = {
'data': order_dict_list
}
print("order book length: ")
print(len(order_dict_list))
# print_dict(order_dict_list[-2])
# print_dict(order_dict_list[-1])
shutdown_session()
return jsonify(result)
if __name__ == '__main__':
app.run(port='5002')
|
normal
|
{
"blob_id": "d9bdf466abecb50c399556b99b41896eead0cb4b",
"index": 2959,
"step-1": "<mask token>\n\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\n<mask token>\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\n@app.route('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\n<mask token>\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\n@app.route('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\n<mask token>\n",
"step-3": "<mask token>\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\napp = Flask(__name__)\n\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\ndef log_message(d):\n create_session()\n order_obj = Log(message=d)\n g.session.add(order_obj)\n shutdown_session()\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\n@app.route('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-4": "from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\nfrom datetime import datetime\nimport sys\nfrom models import Base, Order, Log\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\napp = Flask(__name__)\n\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession)\n\n\n@app.teardown_appcontext\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n<mask token>\n\n\ndef check_sig(payload, sig):\n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n if platform == 'Algorand':\n print('Algorand')\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print('Algo sig verifies!')\n result = True\n elif platform == 'Ethereum':\n print('Ethereum')\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json\n )\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig\n ) == pk:\n print('Eth sig verifies!')\n result = True\n return result, payload_json\n\n\ndef fill_order():\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n if (existing_order.buy_currency == current_order.sell_currency and \n existing_order.sell_currency == current_order.buy_currency and \n existing_order.sell_amount / existing_order.buy_amount >= \n current_order.buy_amount / current_order.sell_amount and \n existing_order.counterparty_id == None):\n order_list.append(existing_order)\n if len(order_list) > 0:\n match_order = order_list[0]\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n if current_order.sell_amount < match_order.buy_amount:\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = (match_order.sell_amount / match_order.\n buy_amount)\n sell_amount_new_match = diff * exchange_rate_match\n new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=\n match_order.receiver_pk, buy_currency=match_order.\n buy_currency, sell_currency=match_order.sell_currency,\n buy_amount=diff, sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('M')\n fill_order()\n if current_order.buy_amount > match_order.sell_amount:\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = (current_order.buy_amount /\n current_order.sell_amount)\n sell_amount_new_current = diff / exchange_rate_current\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk, buy_currency=\n current_order.buy_currency, sell_currency=current_order.\n sell_currency, buy_amount=diff, sell_amount=\n sell_amount_new_current, creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print('C')\n fill_order()\n\n\ndef log_message(d):\n create_session()\n order_obj = Log(message=d)\n g.session.add(order_obj)\n shutdown_session()\n\n\ndef row2dict(row):\n return {c.name: getattr(row, c.name) for c in row.__table__.columns}\n\n\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n\n<mask token>\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print('In trade endpoint')\n if request.method == 'POST':\n print('--------- trade ---------')\n content = request.get_json(silent=True)\n print(f'content = {json.dumps(content)}')\n columns = ['sender_pk', 'receiver_pk', 'buy_currency',\n 'sell_currency', 'buy_amount', 'sell_amount', 'platform']\n fields = ['sig', 'payload']\n for field in fields:\n if not field in content.keys():\n print(f'{field} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n for column in columns:\n if not column in content['payload'].keys():\n print(f'{column} not received by Trade')\n print(json.dumps(content))\n log_message(content)\n return jsonify(False)\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n platforms = ['Algorand', 'Ethereum']\n if not platform in platforms:\n print('input platform is not Algorand or Ethereum')\n return jsonify(False)\n check_result = check_sig(payload, sig)\n result = check_result[0]\n payload_json = check_result[1]\n if result is False:\n print('signature does NOT verify')\n log_message(payload_json)\n return jsonify(result)\n if result is True:\n print('signature verifies')\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=\n payload['receiver_pk'], buy_currency=payload['buy_currency'\n ], sell_currency=payload['sell_currency'], buy_amount=\n payload['buy_amount'], sell_amount=payload['sell_amount'],\n signature=sig)\n g.session.add(order_obj)\n fill_order()\n shutdown_session()\n return jsonify(result)\n\n\n@app.route('/order_book')\ndef order_book():\n print('--------- order_book ---------')\n create_session()\n order_dict_list = [row2dict(order) for order in g.session.query(Order).\n all()]\n result = {'data': order_dict_list}\n print('order book length: ')\n print(len(order_dict_list))\n shutdown_session()\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-5": "from flask import Flask, request, g\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nfrom flask import jsonify\nimport json\nimport eth_account\nimport algosdk\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\nfrom sqlalchemy.orm import load_only\nfrom datetime import datetime\nimport sys\n\nfrom models import Base, Order, Log\nengine = create_engine('sqlite:///orders.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\napp = Flask(__name__)\n\n# These decorators allow you to use g.session to access the database inside the request code\n# g is an \"application global\" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals\n\n@app.before_request\ndef create_session():\n g.session = scoped_session(DBSession) \n\n@app.teardown_appcontext\n# def shutdown_session(response_or_exc):\ndef shutdown_session(exception=None):\n sys.stdout.flush()\n g.session.commit()\n g.session.remove()\n\n\n\"\"\" Suggested helper methods \"\"\"\n\n\n# check whether “sig” is a valid signature of json.dumps(payload),\n# using the signature algorithm specified by the platform field.\n# Be sure to verify the payload using the sender_pk.\ndef check_sig(payload,sig):\n \n pk = payload['sender_pk']\n platform = payload['platform']\n payload_json = json.dumps(payload)\n result = False\n \n if platform == \"Algorand\":\n print(\"Algorand\")\n if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk):\n print(\"Algo sig verifies!\")\n result = True\n\n elif platform == \"Ethereum\":\n print(\"Ethereum\")\n eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json)\n if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk:\n print(\"Eth sig verifies!\")\n result = True\n \n return result, payload_json\n\n\n\n\n\n\n\n\n\n# def fill_order(order,txes=[]):\n# pass\n\n\n# the inner recursive function\ndef fill_order():\n # get the order you just inserted from the DB\n current_order = g.session.query(Order).order_by(Order.id.desc()).first()\n # print(\"_order_id\")\n # print(current_order.id)\n\n # Check if there are any existing orders that match and add them into a list\n order_list = []\n orders = g.session.query(Order).filter(Order.filled == None).all()\n for existing_order in orders:\n # if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)):\n if ((existing_order.buy_currency == current_order.sell_currency)\n and (existing_order.sell_currency == current_order.buy_currency)\n and (existing_order.sell_amount / existing_order.buy_amount\n >= current_order.buy_amount / current_order.sell_amount)\n and (existing_order.counterparty_id == None)):\n order_list.append(existing_order)\n\n # If a match is found between order and existing_order\n if (len(order_list) > 0):\n # print(\" order_list_length\")\n # print(len(order_list))\n # pick the first one in the list\n match_order = order_list[0]\n\n # Set the filled field to be the current timestamp on both orders\n # Set counterparty_id to be the id of the other order\n match_order.filled = datetime.now()\n current_order.filled = datetime.now()\n match_order.counterparty_id = current_order.id\n current_order.counterparty_id = match_order.id\n g.session.commit()\n\n # if both orders can completely fill each other\n # no child order needs to be generated\n\n # If match_order is not completely filled\n if (current_order.sell_amount < match_order.buy_amount):\n # print(\"_match_order is not completely filled\")\n diff = match_order.buy_amount - current_order.sell_amount\n exchange_rate_match = match_order.sell_amount / match_order.buy_amount\n sell_amount_new_match = diff * exchange_rate_match\n # print(match_order.id)\n # print(diff)\n # print(sell_amount_new_match)\n new_order = Order(sender_pk=match_order.sender_pk,\n receiver_pk=match_order.receiver_pk,\n buy_currency=match_order.buy_currency,\n sell_currency=match_order.sell_currency,\n buy_amount=diff,\n sell_amount=sell_amount_new_match,\n creator_id=match_order.id)\n g.session.add(new_order)\n g.session.commit()\n print(\"M\")\n fill_order()\n\n # If current_order is not completely filled\n if (current_order.buy_amount > match_order.sell_amount):\n # print(\"_current_order is not completely filled\")\n diff = current_order.buy_amount - match_order.sell_amount\n exchange_rate_current = current_order.buy_amount / current_order.sell_amount\n sell_amount_new_current = diff / exchange_rate_current\n # print(current_order.id)\n # print(diff)\n # print(sell_amount_new_current)\n new_order = Order(sender_pk=current_order.sender_pk,\n receiver_pk=current_order.receiver_pk,\n buy_currency=current_order.buy_currency,\n sell_currency=current_order.sell_currency,\n buy_amount=diff,\n sell_amount=sell_amount_new_current,\n creator_id=current_order.id)\n g.session.add(new_order)\n g.session.commit()\n print(\"C\")\n fill_order()\n\n\n\n\n\n\n\n\n# Takes input dictionary d and writes it to the Log table\n# Hint: use json.dumps or str() to get it in a nice string form\ndef log_message(d):\n create_session()\n order_obj = Log(message=d)\n g.session.add(order_obj)\n shutdown_session()\n\n\n# convert a row in DB into a dict\ndef row2dict(row):\n return {\n c.name: getattr(row, c.name)\n for c in row.__table__.columns\n }\n\n# print a dictionary nicely\ndef print_dict(d):\n for key, value in d.items():\n print(key, ' : ', value)\n\n \n \n \n\n \n\"\"\" End of helper methods \"\"\"\n\n\n@app.route('/trade', methods=['POST'])\ndef trade():\n print(\"In trade endpoint\")\n if request.method == \"POST\":\n print(\"--------- trade ---------\")\n content = request.get_json(silent=True)\n print( f\"content = {json.dumps(content)}\" )\n columns = [ \"sender_pk\", \"receiver_pk\", \"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", \"platform\" ]\n fields = [ \"sig\", \"payload\" ]\n\n # check whether the input contains both \"sig\" and \"payload\"\n for field in fields:\n if not field in content.keys():\n print( f\"{field} not received by Trade\" )\n print( json.dumps(content) )\n log_message(content)\n return jsonify( False )\n \n # check whether the input contains all 7 fields of payload\n for column in columns:\n if not column in content['payload'].keys():\n print( f\"{column} not received by Trade\" )\n print( json.dumps(content) )\n log_message(content)\n return jsonify( False )\n \n #Your code here\n #Note that you can access the database session using g.session\n\n # TODO 1: Check the signature\n \n # extract contents from json\n sig = content['sig']\n payload = content['payload']\n platform = payload['platform']\n\n # The platform must be either “Algorand” or \"Ethereum\".\n platforms = [\"Algorand\", \"Ethereum\"]\n if not platform in platforms:\n print(\"input platform is not Algorand or Ethereum\")\n return jsonify(False)\n \n # check signature\n check_result = check_sig(payload,sig)\n result = check_result[0]\n payload_json = check_result[1]\n \n # TODO 2: Add the order to the database\n # TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful\n \n # If the signature does not verify, do not insert the order into the “Order” table.\n # Instead, insert a record into the “Log” table, with the message field set to be json.dumps(payload).\n if result is False:\n print(\"signature does NOT verify\")\n log_message(payload_json) \n return jsonify(result)\n \n # If the signature verifies, store the signature,\n # as well as all of the fields under the ‘payload’ in the “Order” table EXCEPT for 'platform’.\n if result is True:\n print(\"signature verifies\")\n create_session()\n order_obj = Order(sender_pk=payload['sender_pk'],\n receiver_pk=payload['receiver_pk'],\n buy_currency=payload['buy_currency'],\n sell_currency=payload['sell_currency'],\n buy_amount=payload['buy_amount'],\n sell_amount=payload['sell_amount'],\n signature=sig) \n g.session.add(order_obj)\n \n # TODO 3: Fill the order\n fill_order()\n shutdown_session()\n return jsonify(result)\n \n \n \n \n\n@app.route('/order_book')\ndef order_book():\n #Your code here\n #Note that you can access the database session using g.session\n \n # The “/order_book” endpoint should return a list of all orders in the database.\n # The response should contain a single key “data” that refers to a list of orders formatted as JSON.\n # Each order should be a dict with (at least) the following fields\n # (\"sender_pk\", \"receiver_pk\", \"buy_currency\", \"sell_currency\", \"buy_amount\", \"sell_amount\", “signature”).\n print(\"--------- order_book ---------\")\n create_session()\n \n # get orders from DB into a list\n order_dict_list = [\n row2dict(order)\n for order in g.session.query(Order).all()\n ]\n \n # add the list into a dict\n result = {\n 'data': order_dict_list\n } \n \n print(\"order book length: \")\n print(len(order_dict_list))\n # print_dict(order_dict_list[-2])\n # print_dict(order_dict_list[-1])\n\n shutdown_session()\n return jsonify(result)\n \n\n \nif __name__ == '__main__':\n app.run(port='5002')\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from . import *
from module import *
from transfer import *
from dataset import *
|
flexible
|
{
"blob_id": "94d992ef4b9015aa8f42071bb1409703d509c313",
"index": 9810,
"step-1": "<mask token>\n",
"step-2": "from . import *\nfrom module import *\nfrom transfer import *\nfrom dataset import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
"""Restaurant"""
moeny = int(input())
service = moeny * 0.1
vat = moeny * 0.07
print('Service Charge : %.2f Baht' % service)
print('VAT : %.2f Baht' % vat)
print('Total : %.2f Baht' % (moeny + vat + service))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
"""Restaurant"""
moeny = int(input())
service = moeny * 0.1
vat = moeny * 0.07
print('Service Charge : %.2f Baht' % service)
print('VAT : %.2f Baht' % vat)
print('Total : %.2f Baht' % (moeny + vat + service))
main()
<|reserved_special_token_1|>
"""Restaurant"""
def main():
"""Restaurant"""
moeny = int(input())
service = moeny*0.1
vat = moeny*0.07
print("Service Charge : %.2f Baht" %service)
print("VAT : %.2f Baht" %vat)
print("Total : %.2f Baht" %(moeny+vat+service))
main()
|
flexible
|
{
"blob_id": "ae6cbb181e024b8c0b222d14120b910919f8cc81",
"index": 3811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"Restaurant\"\"\"\n moeny = int(input())\n service = moeny * 0.1\n vat = moeny * 0.07\n print('Service Charge : %.2f Baht' % service)\n print('VAT : %.2f Baht' % vat)\n print('Total : %.2f Baht' % (moeny + vat + service))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\"Restaurant\"\"\"\n moeny = int(input())\n service = moeny * 0.1\n vat = moeny * 0.07\n print('Service Charge : %.2f Baht' % service)\n print('VAT : %.2f Baht' % vat)\n print('Total : %.2f Baht' % (moeny + vat + service))\n\n\nmain()\n",
"step-4": "\"\"\"Restaurant\"\"\"\ndef main():\n \"\"\"Restaurant\"\"\"\n moeny = int(input())\n service = moeny*0.1\n vat = moeny*0.07\n print(\"Service Charge : %.2f Baht\" %service)\n print(\"VAT : %.2f Baht\" %vat)\n print(\"Total : %.2f Baht\" %(moeny+vat+service))\nmain()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# https://daphne-dev.github.io/2020/09/24/algo-022/
def solution(n):
arr = [[0 for _ in range(i+1)] for i in range(n)]
# 경우의수 는 3가지
# 1. y축이 증가하면서 수가 증가
# 2. x축이 증가하면서 수가 증가
# 3. y,x축이 감소하면서 수가 증가
size = n
num = 0
x = 0
y = -1
while True:
# 1번
for _ in range(size):
num += 1
y += 1
arr[y][x] = num
size-=1
if size == 0:
break
# 2번
for _ in range(size):
num += 1
x += 1
arr[y][x] = num
size-=1
if size == 0:
break
# 3번
for _ in range(size):
num += 1
x -= 1
y -= 1
arr[y][x] = num
size-=1
if size == 0:
break
answer = []
for i in arr:
answer.extend(i)
return answer
# print(solution(4))
|
normal
|
{
"blob_id": "3c029adb59cd6db1e3d4a22e6561f5e2ae827d60",
"index": 2465,
"step-1": "<mask token>\n",
"step-2": "def solution(n):\n arr = [[(0) for _ in range(i + 1)] for i in range(n)]\n size = n\n num = 0\n x = 0\n y = -1\n while True:\n for _ in range(size):\n num += 1\n y += 1\n arr[y][x] = num\n size -= 1\n if size == 0:\n break\n for _ in range(size):\n num += 1\n x += 1\n arr[y][x] = num\n size -= 1\n if size == 0:\n break\n for _ in range(size):\n num += 1\n x -= 1\n y -= 1\n arr[y][x] = num\n size -= 1\n if size == 0:\n break\n answer = []\n for i in arr:\n answer.extend(i)\n return answer\n",
"step-3": "# https://daphne-dev.github.io/2020/09/24/algo-022/\ndef solution(n):\n arr = [[0 for _ in range(i+1)] for i in range(n)]\n # 경우의수 는 3가지\n # 1. y축이 증가하면서 수가 증가\n # 2. x축이 증가하면서 수가 증가\n # 3. y,x축이 감소하면서 수가 증가\n size = n\n num = 0\n x = 0\n y = -1\n while True:\n # 1번\n for _ in range(size):\n num += 1\n y += 1\n arr[y][x] = num\n size-=1\n if size == 0:\n break\n # 2번\n for _ in range(size):\n num += 1\n x += 1\n arr[y][x] = num\n size-=1\n if size == 0:\n break\n # 3번\n for _ in range(size):\n num += 1\n x -= 1\n y -= 1\n arr[y][x] = num\n size-=1\n if size == 0:\n break\n answer = []\n for i in arr:\n answer.extend(i)\n return answer\n# print(solution(4))",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(effort)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
size, k = map(int, input().split())
parcel = list(map(int, input().split()))
effort = 2 * parcel[k - 1] * min(parcel) + max(parcel) * min(parcel)
print(effort)
<|reserved_special_token_1|>
#Question:
"""
The parcel section of the Head Post Office is in a mess. The parcels that need to be loaded to the vans have been lined up in a row in an arbitrary order of weights. The Head Post Master wants them to be sorted in the increasing order of the weights of the parcels, with one exception. He wants the heaviest (and presumably the most valuable) parcel kept nearest his office.
You and your friend try to sort these boxes and you decide to sort them by interchanging two boxes at a time. Such an interchange needs effort equal to the product of the weights of the two boxes.
The objective is to reposition the boxes as required with minimum effort.
Input Format:
The first line consists of two space-separated positive integers giving the number of boxes (N) and the position of the Head Post Masters office (k) where the heaviest box must be.
The second line consists of N space-separated positive integers giving the weights of the boxes. You may assume that no two weights are equal
Output Format:
The output is one line giving the total effort taken to get the boxes in sorted order, and the heaviest in position k.
Constraints:
N<=50 and Weights <= 1000
Sample Input 1:
5 2
20 50 30 80 70
Sample Output 1:
3600
"""
#Solution:
size,k = map(int,input().split())
parcel = list(map(int,input().split()))
effort = 2*parcel[k-1]*min(parcel) + max(parcel)*min(parcel)
print(effort)
|
flexible
|
{
"blob_id": "92dea316889192824c353002670cdcf03dfbcd4c",
"index": 1457,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(effort)\n",
"step-3": "<mask token>\nsize, k = map(int, input().split())\nparcel = list(map(int, input().split()))\neffort = 2 * parcel[k - 1] * min(parcel) + max(parcel) * min(parcel)\nprint(effort)\n",
"step-4": "#Question:\n\n\"\"\"\nThe parcel section of the Head Post Office is in a mess. The parcels that need to be loaded to the vans have been lined up in a row in an arbitrary order of weights. The Head Post Master wants them to be sorted in the increasing order of the weights of the parcels, with one exception. He wants the heaviest (and presumably the most valuable) parcel kept nearest his office.\n\nYou and your friend try to sort these boxes and you decide to sort them by interchanging two boxes at a time. Such an interchange needs effort equal to the product of the weights of the two boxes.\n\nThe objective is to reposition the boxes as required with minimum effort.\n\nInput Format:\n\nThe first line consists of two space-separated positive integers giving the number of boxes (N) and the position of the Head Post Masters office (k) where the heaviest box must be.\nThe second line consists of N space-separated positive integers giving the weights of the boxes. You may assume that no two weights are equal\nOutput Format:\n\nThe output is one line giving the total effort taken to get the boxes in sorted order, and the heaviest in position k.\nConstraints:\n\n N<=50 and Weights <= 1000\n\nSample Input 1:\n5 2\n20 50 30 80 70\nSample Output 1:\n3600\n\"\"\"\n\n#Solution:\n\nsize,k = map(int,input().split())\nparcel = list(map(int,input().split()))\neffort = 2*parcel[k-1]*min(parcel) + max(parcel)*min(parcel)\nprint(effort)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def max_product(n):
lst, lstnums, res, num = [], [], [], 1
for i in range(0, n+1):
lstnums.append(i)
for j in str(i):
num *= int(j)
lst.append(num)
num = 1
maxlst = max(lst)
for i in range(len(lst)):
if lst[i] == maxlst:
res.append(lstnums[i])
return res
|
normal
|
{
"blob_id": "c804391cc199a242d1b54ece8487ef74065a40ad",
"index": 840,
"step-1": "\ndef max_product(n):\n lst, lstnums, res, num = [], [], [], 1\n for i in range(0, n+1):\n lstnums.append(i)\n for j in str(i):\n num *= int(j)\n lst.append(num)\n num = 1\n\n maxlst = max(lst)\n for i in range(len(lst)):\n if lst[i] == maxlst:\n res.append(lstnums[i])\n\n return res\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
file_handle.write(contents)
file_handle.close()
print(contents)
<|reserved_special_token_1|>
fileName = str(input(
'Please write the name of the file you would like to open: '))
file_handle = open(fileName, 'w')
contents = str(input('Please write the content you would like to save.'))
file_handle.write(contents)
file_handle.close()
print(contents)
<|reserved_special_token_1|>
fileName = str(input("Please write the name of the file you would like to open: "))
file_handle = open(fileName, "w")
contents = str(input("Please write the content you would like to save."))
file_handle.write(contents)
file_handle.close()
print(contents)
|
flexible
|
{
"blob_id": "aed09a3c04f284fa0b8844a47c5bc9d1621a9b5f",
"index": 2034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)\n",
"step-3": "fileName = str(input(\n 'Please write the name of the file you would like to open: '))\nfile_handle = open(fileName, 'w')\ncontents = str(input('Please write the content you would like to save.'))\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)\n",
"step-4": "fileName = str(input(\"Please write the name of the file you would like to open: \"))\n\nfile_handle = open(fileName, \"w\")\ncontents = str(input(\"Please write the content you would like to save.\"))\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""This module will serve the api request."""
import json
from bson.json_util import dumps
from flask import abort, request, Response, jsonify
from api import app, collection
@app.route("/api/v1/users", methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
# Create new user
try:
body = request.get_json()
except:
# Bad request as request body is not available
return abort(400)
record_id = collection.insert(body)
return jsonify({"message":"Successfully Created the resource."}), 201
except:
# Error while trying to create the resource
return "Error while trying to create the resource", 500
@app.route("/api/v1/users", methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
# Fetch all the record(s)
records_fetched = collection.find()
# Check if the records are found
if records_fetched.count() > 0:
# Prepare the response
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
# No records are found
return jsonify({"message":"No records are found"}), 404
except Exception as e:
print(str(e))
# Error while trying to fetch the resource
return jsonify({"message":"Error while trying to fetch the resource"}), 500
@app.route("/api/v1/users/<user_id>", methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
# Get the value which needs to be updated
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
# Bad request as the request body is not available
# Add message for debugging purpose
return "", 400
# Updating the user
records_updated = collection.update_one({"id": int(user_id)}, body)
# Check if resource is updated
if records_updated.modified_count > 0:
# Prepare the response as resource is updated successfully
return "", 200
else:
# Bad request as the resource is not available to update
# Add message for debugging purpose
return "", 404
except:
# Error while trying to update the resource
# Add message for debugging purpose
return "", 500
@app.route("/api/v1/users/<user_id>", methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
# Delete the user
delete_user = collection.delete_one({"id": int(user_id)})
if delete_user.deleted_count > 0 :
# Prepare the response
return "", 204
else:
# Resource Not found
return "", 404
except:
# Error while trying to delete the resource
# Add message for debugging purpose
return "", 500
@app.errorhandler(404)
def page_not_found(e):
"""Send message to the user with notFound 404 status."""
# Message to the user
message = {
"err":
{
"msg": "This route is currently not supported. Please refer API documentation."
}
}
# Making the message looks good
resp = jsonify(message)
# Sending OK response
resp.status_code = 404
# Returning the object
return resp
|
normal
|
{
"blob_id": "0f4bb65b93df997ca1a9b7945ebcec53a2f43822",
"index": 3636,
"step-1": "<mask token>\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\n@app.route('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\n@app.route('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\n@app.route('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n message = {'err': {'msg':\n 'This route is currently not supported. Please refer API documentation.'\n }}\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n",
"step-4": "<mask token>\nimport json\nfrom bson.json_util import dumps\nfrom flask import abort, request, Response, jsonify\nfrom api import app, collection\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\n@app.route('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\n@app.route('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n message = {'err': {'msg':\n 'This route is currently not supported. Please refer API documentation.'\n }}\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n",
"step-5": "\"\"\"This module will serve the api request.\"\"\"\n\nimport json\nfrom bson.json_util import dumps\nfrom flask import abort, request, Response, jsonify\nfrom api import app, collection\n\n\n@app.route(\"/api/v1/users\", methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n # Create new user\n try:\n body = request.get_json()\n except:\n # Bad request as request body is not available\n return abort(400)\n\n record_id = collection.insert(body)\n return jsonify({\"message\":\"Successfully Created the resource.\"}), 201\n\n except:\n # Error while trying to create the resource\n return \"Error while trying to create the resource\", 500\n\n\n@app.route(\"/api/v1/users\", methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n # Fetch all the record(s)\n records_fetched = collection.find()\n\n # Check if the records are found\n if records_fetched.count() > 0:\n # Prepare the response\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n # No records are found\n return jsonify({\"message\":\"No records are found\"}), 404\n except Exception as e:\n print(str(e))\n # Error while trying to fetch the resource\n return jsonify({\"message\":\"Error while trying to fetch the resource\"}), 500\n\n\n@app.route(\"/api/v1/users/<user_id>\", methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n # Get the value which needs to be updated\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n # Bad request as the request body is not available\n # Add message for debugging purpose\n return \"\", 400\n\n # Updating the user\n records_updated = collection.update_one({\"id\": int(user_id)}, body)\n\n # Check if resource is updated\n if records_updated.modified_count > 0:\n # Prepare the response as resource is updated successfully\n return \"\", 200\n else:\n # Bad request as the resource is not available to update\n # Add message for debugging purpose\n return \"\", 404\n except:\n # Error while trying to update the resource\n # Add message for debugging purpose\n return \"\", 500\n\n\n@app.route(\"/api/v1/users/<user_id>\", methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n # Delete the user\n delete_user = collection.delete_one({\"id\": int(user_id)})\n\n if delete_user.deleted_count > 0 :\n # Prepare the response\n return \"\", 204\n else:\n # Resource Not found\n return \"\", 404\n except:\n # Error while trying to delete the resource\n # Add message for debugging purpose\n return \"\", 500\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n # Message to the user\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation.\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the object\n return resp\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import giraffe.configuration.common_testing_artifactrs as commons
from giraffe.business_logic.ingestion_manger import IngestionManager
from redis import Redis
def test_parse_redis_key(config_helper, ingestion_manager):
im = ingestion_manager
job_name = config_helper.nodes_ingestion_operation
operation = config_helper.nodes_ingestion_operation
labels = config_helper.test_labels
parsed: IngestionManager.key_elements_type = im.parse_redis_key(
key=f'{job_name}{config_helper.key_separator}{operation}{config_helper.key_separator}{",".join(labels)}')
assert parsed.job_name == job_name
assert parsed.operation == operation
assert set(parsed.arguments) == set(labels)
def test_publish_job(config_helper, redis_driver, ingestion_manager, nodes, edges, logger, redis_db):
r: Redis = redis_driver
im: IngestionManager = ingestion_manager
commons.purge_redis_database(redis_db=redis_db, log=logger)
# Populate nodes
im.publish_job(job_name=config_helper.test_job_name,
operation=config_helper.nodes_ingestion_operation,
operation_arguments=','.join(config_helper.test_labels),
items=[str(value) for value in nodes])
# Populate edges
im.publish_job(job_name=config_helper.test_job_name,
operation=config_helper.edges_ingestion_operation,
operation_arguments=f'{config_helper.test_edge_type},{config_helper.test_labels[0]}',
items=[str(value) for value in edges])
keys = r.keys(pattern=f'{config_helper.test_job_name}*')
assert len(keys) == 2
node_keys = r.keys(pattern=f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.nodes_ingestion_operation}{config_helper.key_separator}*')
assert len(node_keys) == 1
edges_keys = r.keys(pattern=f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.edges_ingestion_operation}{config_helper.key_separator}*')
assert len(edges_keys) == 1
nodes_key = node_keys[0]
edges_key = edges_keys[0]
num_stored_nodes = r.scard(name=nodes_key)
assert num_stored_nodes == len(nodes)
num_stored_edges = r.scard(name=edges_key)
assert num_stored_edges == len(edges)
def test_process_job(config_helper, ingestion_manager, redis_db, logger, neo):
commons.purge_redis_database(redis_db=redis_db, log=logger)
commons.purge_neo4j_database(log=logger, neo=neo)
commons.init_redis_test_data(im=ingestion_manager)
im = ingestion_manager
im.process_redis_content(translation_id=config_helper.test_job_name, request_id='unit-testing')
query = f'MATCH (:{config_helper.test_labels[0]}) RETURN COUNT(*) AS count'
count = neo.pull_query(query=query).value()[0]
assert count == config_helper.number_of_test_nodes
query = f'MATCH ()-[:{config_helper.test_edge_type}]->() RETURN COUNT(*) AS count'
count = neo.pull_query(query=query).value()[0]
assert count == config_helper.number_of_test_edges
|
normal
|
{
"blob_id": "13451352e8dcdfe64771f9fc188b13a31b8109f5",
"index": 4555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_parse_redis_key(config_helper, ingestion_manager):\n im = ingestion_manager\n job_name = config_helper.nodes_ingestion_operation\n operation = config_helper.nodes_ingestion_operation\n labels = config_helper.test_labels\n parsed: IngestionManager.key_elements_type = im.parse_redis_key(key=\n f\"{job_name}{config_helper.key_separator}{operation}{config_helper.key_separator}{','.join(labels)}\"\n )\n assert parsed.job_name == job_name\n assert parsed.operation == operation\n assert set(parsed.arguments) == set(labels)\n\n\n<mask token>\n\n\ndef test_process_job(config_helper, ingestion_manager, redis_db, logger, neo):\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n commons.purge_neo4j_database(log=logger, neo=neo)\n commons.init_redis_test_data(im=ingestion_manager)\n im = ingestion_manager\n im.process_redis_content(translation_id=config_helper.test_job_name,\n request_id='unit-testing')\n query = f'MATCH (:{config_helper.test_labels[0]}) RETURN COUNT(*) AS count'\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_nodes\n query = (\n f'MATCH ()-[:{config_helper.test_edge_type}]->() RETURN COUNT(*) AS count'\n )\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_edges\n",
"step-3": "<mask token>\n\n\ndef test_parse_redis_key(config_helper, ingestion_manager):\n im = ingestion_manager\n job_name = config_helper.nodes_ingestion_operation\n operation = config_helper.nodes_ingestion_operation\n labels = config_helper.test_labels\n parsed: IngestionManager.key_elements_type = im.parse_redis_key(key=\n f\"{job_name}{config_helper.key_separator}{operation}{config_helper.key_separator}{','.join(labels)}\"\n )\n assert parsed.job_name == job_name\n assert parsed.operation == operation\n assert set(parsed.arguments) == set(labels)\n\n\ndef test_publish_job(config_helper, redis_driver, ingestion_manager, nodes,\n edges, logger, redis_db):\n r: Redis = redis_driver\n im: IngestionManager = ingestion_manager\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n im.publish_job(job_name=config_helper.test_job_name, operation=\n config_helper.nodes_ingestion_operation, operation_arguments=','.\n join(config_helper.test_labels), items=[str(value) for value in nodes])\n im.publish_job(job_name=config_helper.test_job_name, operation=\n config_helper.edges_ingestion_operation, operation_arguments=\n f'{config_helper.test_edge_type},{config_helper.test_labels[0]}',\n items=[str(value) for value in edges])\n keys = r.keys(pattern=f'{config_helper.test_job_name}*')\n assert len(keys) == 2\n node_keys = r.keys(pattern=\n f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.nodes_ingestion_operation}{config_helper.key_separator}*'\n )\n assert len(node_keys) == 1\n edges_keys = r.keys(pattern=\n f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.edges_ingestion_operation}{config_helper.key_separator}*'\n )\n assert len(edges_keys) == 1\n nodes_key = node_keys[0]\n edges_key = edges_keys[0]\n num_stored_nodes = r.scard(name=nodes_key)\n assert num_stored_nodes == len(nodes)\n num_stored_edges = r.scard(name=edges_key)\n assert num_stored_edges == len(edges)\n\n\ndef test_process_job(config_helper, ingestion_manager, redis_db, logger, neo):\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n commons.purge_neo4j_database(log=logger, neo=neo)\n commons.init_redis_test_data(im=ingestion_manager)\n im = ingestion_manager\n im.process_redis_content(translation_id=config_helper.test_job_name,\n request_id='unit-testing')\n query = f'MATCH (:{config_helper.test_labels[0]}) RETURN COUNT(*) AS count'\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_nodes\n query = (\n f'MATCH ()-[:{config_helper.test_edge_type}]->() RETURN COUNT(*) AS count'\n )\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_edges\n",
"step-4": "import giraffe.configuration.common_testing_artifactrs as commons\nfrom giraffe.business_logic.ingestion_manger import IngestionManager\nfrom redis import Redis\n\n\ndef test_parse_redis_key(config_helper, ingestion_manager):\n im = ingestion_manager\n job_name = config_helper.nodes_ingestion_operation\n operation = config_helper.nodes_ingestion_operation\n labels = config_helper.test_labels\n parsed: IngestionManager.key_elements_type = im.parse_redis_key(key=\n f\"{job_name}{config_helper.key_separator}{operation}{config_helper.key_separator}{','.join(labels)}\"\n )\n assert parsed.job_name == job_name\n assert parsed.operation == operation\n assert set(parsed.arguments) == set(labels)\n\n\ndef test_publish_job(config_helper, redis_driver, ingestion_manager, nodes,\n edges, logger, redis_db):\n r: Redis = redis_driver\n im: IngestionManager = ingestion_manager\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n im.publish_job(job_name=config_helper.test_job_name, operation=\n config_helper.nodes_ingestion_operation, operation_arguments=','.\n join(config_helper.test_labels), items=[str(value) for value in nodes])\n im.publish_job(job_name=config_helper.test_job_name, operation=\n config_helper.edges_ingestion_operation, operation_arguments=\n f'{config_helper.test_edge_type},{config_helper.test_labels[0]}',\n items=[str(value) for value in edges])\n keys = r.keys(pattern=f'{config_helper.test_job_name}*')\n assert len(keys) == 2\n node_keys = r.keys(pattern=\n f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.nodes_ingestion_operation}{config_helper.key_separator}*'\n )\n assert len(node_keys) == 1\n edges_keys = r.keys(pattern=\n f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.edges_ingestion_operation}{config_helper.key_separator}*'\n )\n assert len(edges_keys) == 1\n nodes_key = node_keys[0]\n edges_key = edges_keys[0]\n num_stored_nodes = r.scard(name=nodes_key)\n assert num_stored_nodes == len(nodes)\n num_stored_edges = r.scard(name=edges_key)\n assert num_stored_edges == len(edges)\n\n\ndef test_process_job(config_helper, ingestion_manager, redis_db, logger, neo):\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n commons.purge_neo4j_database(log=logger, neo=neo)\n commons.init_redis_test_data(im=ingestion_manager)\n im = ingestion_manager\n im.process_redis_content(translation_id=config_helper.test_job_name,\n request_id='unit-testing')\n query = f'MATCH (:{config_helper.test_labels[0]}) RETURN COUNT(*) AS count'\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_nodes\n query = (\n f'MATCH ()-[:{config_helper.test_edge_type}]->() RETURN COUNT(*) AS count'\n )\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_edges\n",
"step-5": "import giraffe.configuration.common_testing_artifactrs as commons\nfrom giraffe.business_logic.ingestion_manger import IngestionManager\nfrom redis import Redis\n\n\ndef test_parse_redis_key(config_helper, ingestion_manager):\n im = ingestion_manager\n job_name = config_helper.nodes_ingestion_operation\n operation = config_helper.nodes_ingestion_operation\n labels = config_helper.test_labels\n parsed: IngestionManager.key_elements_type = im.parse_redis_key(\n key=f'{job_name}{config_helper.key_separator}{operation}{config_helper.key_separator}{\",\".join(labels)}')\n assert parsed.job_name == job_name\n assert parsed.operation == operation\n assert set(parsed.arguments) == set(labels)\n\n\ndef test_publish_job(config_helper, redis_driver, ingestion_manager, nodes, edges, logger, redis_db):\n r: Redis = redis_driver\n im: IngestionManager = ingestion_manager\n\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n\n # Populate nodes\n im.publish_job(job_name=config_helper.test_job_name,\n operation=config_helper.nodes_ingestion_operation,\n operation_arguments=','.join(config_helper.test_labels),\n items=[str(value) for value in nodes])\n\n # Populate edges\n im.publish_job(job_name=config_helper.test_job_name,\n operation=config_helper.edges_ingestion_operation,\n operation_arguments=f'{config_helper.test_edge_type},{config_helper.test_labels[0]}',\n items=[str(value) for value in edges])\n\n keys = r.keys(pattern=f'{config_helper.test_job_name}*')\n assert len(keys) == 2\n node_keys = r.keys(pattern=f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.nodes_ingestion_operation}{config_helper.key_separator}*')\n assert len(node_keys) == 1\n edges_keys = r.keys(pattern=f'{config_helper.test_job_name}{config_helper.key_separator}{config_helper.edges_ingestion_operation}{config_helper.key_separator}*')\n assert len(edges_keys) == 1\n\n nodes_key = node_keys[0]\n edges_key = edges_keys[0]\n\n num_stored_nodes = r.scard(name=nodes_key)\n assert num_stored_nodes == len(nodes)\n num_stored_edges = r.scard(name=edges_key)\n assert num_stored_edges == len(edges)\n\n\ndef test_process_job(config_helper, ingestion_manager, redis_db, logger, neo):\n commons.purge_redis_database(redis_db=redis_db, log=logger)\n commons.purge_neo4j_database(log=logger, neo=neo)\n commons.init_redis_test_data(im=ingestion_manager)\n im = ingestion_manager\n im.process_redis_content(translation_id=config_helper.test_job_name, request_id='unit-testing')\n query = f'MATCH (:{config_helper.test_labels[0]}) RETURN COUNT(*) AS count'\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_nodes\n query = f'MATCH ()-[:{config_helper.test_edge_type}]->() RETURN COUNT(*) AS count'\n count = neo.pull_query(query=query).value()[0]\n assert count == config_helper.number_of_test_edges\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class LoggingMiddleware(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
<|reserved_special_token_0|>
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from application import app
import pprint
import sys
URL_PREFIX = '/pub/livemap'
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
if '-l' not in sys.argv:
app.wsgi_app = ScriptNameEdit(app.wsgi_app)
application = app
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
<|reserved_special_token_1|>
#!/usr/bin/env python
from application import app
import pprint
import sys
URL_PREFIX = '/pub/livemap'
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
class ScriptNameEdit(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
url = environ['SCRIPT_NAME']
environ['wsgi.url_scheme'] = 'https'
environ['SCRIPT_NAME'] = URL_PREFIX + url
return self.app(environ, start_response)
if '-l' not in sys.argv:
# app.wsgi_app = LoggingMiddleware(app.wsgi_app)
app.wsgi_app = ScriptNameEdit(app.wsgi_app)
application = app
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=True)
|
flexible
|
{
"blob_id": "a2aa615ac660f13727a97cdd2feaca8f6e457da4",
"index": 4830,
"step-1": "<mask token>\n\n\nclass LoggingMiddleware(object):\n <mask token>\n <mask token>\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LoggingMiddleware(object):\n\n def __init__(self, app):\n self._app = app\n <mask token>\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LoggingMiddleware(object):\n\n def __init__(self, app):\n self._app = app\n\n def __call__(self, environ, resp):\n errorlog = environ['wsgi.errors']\n pprint.pprint(('REQUEST', environ), stream=errorlog)\n\n def log_response(status, headers, *args):\n pprint.pprint(('RESPONSE', status, headers), stream=errorlog)\n return resp(status, headers, *args)\n return self._app(environ, log_response)\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\n<mask token>\n",
"step-4": "from application import app\nimport pprint\nimport sys\nURL_PREFIX = '/pub/livemap'\n\n\nclass LoggingMiddleware(object):\n\n def __init__(self, app):\n self._app = app\n\n def __call__(self, environ, resp):\n errorlog = environ['wsgi.errors']\n pprint.pprint(('REQUEST', environ), stream=errorlog)\n\n def log_response(status, headers, *args):\n pprint.pprint(('RESPONSE', status, headers), stream=errorlog)\n return resp(status, headers, *args)\n return self._app(environ, log_response)\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\nif '-l' not in sys.argv:\n app.wsgi_app = ScriptNameEdit(app.wsgi_app)\napplication = app\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=True)\n",
"step-5": "#!/usr/bin/env python\nfrom application import app\nimport pprint\nimport sys\n\nURL_PREFIX = '/pub/livemap'\n\n\nclass LoggingMiddleware(object):\n def __init__(self, app):\n self._app = app\n\n def __call__(self, environ, resp):\n errorlog = environ['wsgi.errors']\n pprint.pprint(('REQUEST', environ), stream=errorlog)\n\n def log_response(status, headers, *args):\n pprint.pprint(('RESPONSE', status, headers), stream=errorlog)\n return resp(status, headers, *args)\n\n return self._app(environ, log_response)\n\n\nclass ScriptNameEdit(object):\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n url = environ['SCRIPT_NAME']\n environ['wsgi.url_scheme'] = 'https'\n environ['SCRIPT_NAME'] = URL_PREFIX + url\n return self.app(environ, start_response)\n\n\nif '-l' not in sys.argv:\n # app.wsgi_app = LoggingMiddleware(app.wsgi_app)\n app.wsgi_app = ScriptNameEdit(app.wsgi_app)\n\napplication = app\n\nif __name__ == \"__main__\":\n\n app.run(host='0.0.0.0', threaded=True)\n",
"step-ids": [
4,
5,
6,
9,
10
]
}
|
[
4,
5,
6,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .interface import AudioInterface
from .config import AudioConfig
from .buffer import CustomBuffer
|
flexible
|
{
"blob_id": "cc33d0cf1b922a6b48fb83be07acb35a62372f2e",
"index": 8260,
"step-1": "<mask token>\n",
"step-2": "from .interface import AudioInterface\nfrom .config import AudioConfig\nfrom .buffer import CustomBuffer\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(hfilename, 'r') as file:
for line in file:
tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)
WordList = tweetSplitter.tokenize(line)
regex1 = re.compile('^#.+')
regex2 = re.compile('[^\\W\\d]')
regex3 = re.compile('^http*')
regex4 = re.compile('.+\\..+')
for item in WordList:
if len(item) > 2:
if re.match(regex1, item):
newitem = item[1:]
BagOfHashes.append(newitem)
hashcount = hashcount + 1
elif re.match(regex2, item):
if re.match(regex3, item) or re.match(regex4, item):
BagOfLinks.append(item)
else:
BagOfWords.append(item)
wordcount = wordcount + 1
else:
pass
else:
pass
<|reserved_special_token_0|>
for word in BigBag:
if word.lower() not in stopwords.words() and word.lower(
) not in IgnoreThese:
filtered_words.append(word.lower())
<|reserved_special_token_0|>
with open('wordcloud.txt', 'w') as f:
f.write(word_string)
with open('tableau.txt', 'w') as f:
for s in filtered_words:
f.write('%s\n' % s)
<|reserved_special_token_0|>
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(TwitterWordCloud)
plt.axis('off')
plt.tight_layout(pad=0)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hfilename = 'file.txt'
linecount = 0
hashcount = 0
wordcount = 0
BagOfWords = []
BagOfHashes = []
BagOfLinks = []
with open(hfilename, 'r') as file:
for line in file:
tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)
WordList = tweetSplitter.tokenize(line)
regex1 = re.compile('^#.+')
regex2 = re.compile('[^\\W\\d]')
regex3 = re.compile('^http*')
regex4 = re.compile('.+\\..+')
for item in WordList:
if len(item) > 2:
if re.match(regex1, item):
newitem = item[1:]
BagOfHashes.append(newitem)
hashcount = hashcount + 1
elif re.match(regex2, item):
if re.match(regex3, item) or re.match(regex4, item):
BagOfLinks.append(item)
else:
BagOfWords.append(item)
wordcount = wordcount + 1
else:
pass
else:
pass
BigBag = BagOfWords + BagOfHashes
IgnoreThese = []
filtered_words = []
for word in BigBag:
if word.lower() not in stopwords.words() and word.lower(
) not in IgnoreThese:
filtered_words.append(word.lower())
word_string = ' '.join(filtered_words)
with open('wordcloud.txt', 'w') as f:
f.write(word_string)
with open('tableau.txt', 'w') as f:
for s in filtered_words:
f.write('%s\n' % s)
TwitterWordCloud = WordCloud(width=800, height=800, background_color=
'white', stopwords=None, min_font_size=10).generate(word_string)
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(TwitterWordCloud)
plt.axis('off')
plt.tight_layout(pad=0)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import re
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
hfilename = 'file.txt'
linecount = 0
hashcount = 0
wordcount = 0
BagOfWords = []
BagOfHashes = []
BagOfLinks = []
with open(hfilename, 'r') as file:
for line in file:
tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)
WordList = tweetSplitter.tokenize(line)
regex1 = re.compile('^#.+')
regex2 = re.compile('[^\\W\\d]')
regex3 = re.compile('^http*')
regex4 = re.compile('.+\\..+')
for item in WordList:
if len(item) > 2:
if re.match(regex1, item):
newitem = item[1:]
BagOfHashes.append(newitem)
hashcount = hashcount + 1
elif re.match(regex2, item):
if re.match(regex3, item) or re.match(regex4, item):
BagOfLinks.append(item)
else:
BagOfWords.append(item)
wordcount = wordcount + 1
else:
pass
else:
pass
BigBag = BagOfWords + BagOfHashes
IgnoreThese = []
filtered_words = []
for word in BigBag:
if word.lower() not in stopwords.words() and word.lower(
) not in IgnoreThese:
filtered_words.append(word.lower())
word_string = ' '.join(filtered_words)
with open('wordcloud.txt', 'w') as f:
f.write(word_string)
with open('tableau.txt', 'w') as f:
for s in filtered_words:
f.write('%s\n' % s)
TwitterWordCloud = WordCloud(width=800, height=800, background_color=
'white', stopwords=None, min_font_size=10).generate(word_string)
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(TwitterWordCloud)
plt.axis('off')
plt.tight_layout(pad=0)
plt.show()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 2 16:07:25 2018
@author: Yigao
"""
import re
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
## create a tokenizer
hfilename = "file.txt"
linecount=0
hashcount=0
wordcount=0
BagOfWords=[]
BagOfHashes=[]
BagOfLinks=[]
with open(hfilename, "r") as file:
for line in file:
#print(line,"\n")
tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)
WordList=tweetSplitter.tokenize(line)
#WordList2=word_tokenize(line)
#linecount=linecount+1
#print(WordList)
#print(len(WordList))
#print(WordList[0])
#print(WordList2)
#print(len(WordList2))
#print(WordList2[3:6])
#print("NEXT..........\n")
regex1=re.compile('^#.+')
regex2=re.compile('[^\W\d]') #no numbers
regex3=re.compile('^http*')
regex4=re.compile('.+\..+')
for item in WordList:
if(len(item)>2):
if((re.match(regex1,item))):
#print(item)
newitem=item[1:] #remove the hash
BagOfHashes.append(newitem)
hashcount=hashcount+1
elif(re.match(regex2,item)):
if(re.match(regex3,item) or re.match(regex4,item)):
BagOfLinks.append(item)
else:
BagOfWords.append(item)
wordcount=wordcount+1
else:
pass
else:
pass
#print(linecount)
#print(BagOfWords)
#print(BagOfHashes)
#print(BagOfLinks)
BigBag=BagOfWords+BagOfHashes
## create Word Cloud
IgnoreThese=[] #other irrelevant words
filtered_words = [] #list of words ready for wordcloud
for word in BigBag:
if (word.lower() not in stopwords.words()) and (word.lower() not in IgnoreThese):
filtered_words.append(word.lower())
word_string = " ".join(filtered_words)
with open("wordcloud.txt", "w") as f:
f.write(word_string)
with open("tableau.txt", "w") as f:
for s in filtered_words:
f.write("%s\n" % s)
TwitterWordCloud = WordCloud(width = 800, height = 800, background_color = "white", stopwords = None,
min_font_size = 10).generate(word_string)
plt.figure(figsize = (8,8), facecolor = None)
plt.imshow(TwitterWordCloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
|
flexible
|
{
"blob_id": "fd04f6f4a03fdbe40e400d04e5759ef9ef30f974",
"index": 6634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(hfilename, 'r') as file:\n for line in file:\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList = tweetSplitter.tokenize(line)\n regex1 = re.compile('^#.+')\n regex2 = re.compile('[^\\\\W\\\\d]')\n regex3 = re.compile('^http*')\n regex4 = re.compile('.+\\\\..+')\n for item in WordList:\n if len(item) > 2:\n if re.match(regex1, item):\n newitem = item[1:]\n BagOfHashes.append(newitem)\n hashcount = hashcount + 1\n elif re.match(regex2, item):\n if re.match(regex3, item) or re.match(regex4, item):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount = wordcount + 1\n else:\n pass\n else:\n pass\n<mask token>\nfor word in BigBag:\n if word.lower() not in stopwords.words() and word.lower(\n ) not in IgnoreThese:\n filtered_words.append(word.lower())\n<mask token>\nwith open('wordcloud.txt', 'w') as f:\n f.write(word_string)\nwith open('tableau.txt', 'w') as f:\n for s in filtered_words:\n f.write('%s\\n' % s)\n<mask token>\nplt.figure(figsize=(8, 8), facecolor=None)\nplt.imshow(TwitterWordCloud)\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n",
"step-3": "<mask token>\nhfilename = 'file.txt'\nlinecount = 0\nhashcount = 0\nwordcount = 0\nBagOfWords = []\nBagOfHashes = []\nBagOfLinks = []\nwith open(hfilename, 'r') as file:\n for line in file:\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList = tweetSplitter.tokenize(line)\n regex1 = re.compile('^#.+')\n regex2 = re.compile('[^\\\\W\\\\d]')\n regex3 = re.compile('^http*')\n regex4 = re.compile('.+\\\\..+')\n for item in WordList:\n if len(item) > 2:\n if re.match(regex1, item):\n newitem = item[1:]\n BagOfHashes.append(newitem)\n hashcount = hashcount + 1\n elif re.match(regex2, item):\n if re.match(regex3, item) or re.match(regex4, item):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount = wordcount + 1\n else:\n pass\n else:\n pass\nBigBag = BagOfWords + BagOfHashes\nIgnoreThese = []\nfiltered_words = []\nfor word in BigBag:\n if word.lower() not in stopwords.words() and word.lower(\n ) not in IgnoreThese:\n filtered_words.append(word.lower())\nword_string = ' '.join(filtered_words)\nwith open('wordcloud.txt', 'w') as f:\n f.write(word_string)\nwith open('tableau.txt', 'w') as f:\n for s in filtered_words:\n f.write('%s\\n' % s)\nTwitterWordCloud = WordCloud(width=800, height=800, background_color=\n 'white', stopwords=None, min_font_size=10).generate(word_string)\nplt.figure(figsize=(8, 8), facecolor=None)\nplt.imshow(TwitterWordCloud)\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n",
"step-4": "<mask token>\nimport re\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nhfilename = 'file.txt'\nlinecount = 0\nhashcount = 0\nwordcount = 0\nBagOfWords = []\nBagOfHashes = []\nBagOfLinks = []\nwith open(hfilename, 'r') as file:\n for line in file:\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList = tweetSplitter.tokenize(line)\n regex1 = re.compile('^#.+')\n regex2 = re.compile('[^\\\\W\\\\d]')\n regex3 = re.compile('^http*')\n regex4 = re.compile('.+\\\\..+')\n for item in WordList:\n if len(item) > 2:\n if re.match(regex1, item):\n newitem = item[1:]\n BagOfHashes.append(newitem)\n hashcount = hashcount + 1\n elif re.match(regex2, item):\n if re.match(regex3, item) or re.match(regex4, item):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount = wordcount + 1\n else:\n pass\n else:\n pass\nBigBag = BagOfWords + BagOfHashes\nIgnoreThese = []\nfiltered_words = []\nfor word in BigBag:\n if word.lower() not in stopwords.words() and word.lower(\n ) not in IgnoreThese:\n filtered_words.append(word.lower())\nword_string = ' '.join(filtered_words)\nwith open('wordcloud.txt', 'w') as f:\n f.write(word_string)\nwith open('tableau.txt', 'w') as f:\n for s in filtered_words:\n f.write('%s\\n' % s)\nTwitterWordCloud = WordCloud(width=800, height=800, background_color=\n 'white', stopwords=None, min_font_size=10).generate(word_string)\nplt.figure(figsize=(8, 8), facecolor=None)\nplt.imshow(TwitterWordCloud)\nplt.axis('off')\nplt.tight_layout(pad=0)\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 2 16:07:25 2018\n\n@author: Yigao\n\"\"\"\n\nimport re\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\n## create a tokenizer\nhfilename = \"file.txt\"\nlinecount=0\nhashcount=0\nwordcount=0\nBagOfWords=[]\nBagOfHashes=[]\nBagOfLinks=[]\nwith open(hfilename, \"r\") as file:\n for line in file:\n #print(line,\"\\n\")\n tweetSplitter = TweetTokenizer(strip_handles=True, reduce_len=True)\n WordList=tweetSplitter.tokenize(line)\n #WordList2=word_tokenize(line)\n #linecount=linecount+1\n #print(WordList)\n #print(len(WordList))\n #print(WordList[0])\n #print(WordList2)\n #print(len(WordList2))\n #print(WordList2[3:6])\n #print(\"NEXT..........\\n\")\n regex1=re.compile('^#.+')\n regex2=re.compile('[^\\W\\d]') #no numbers\n regex3=re.compile('^http*')\n regex4=re.compile('.+\\..+')\n for item in WordList:\n if(len(item)>2):\n if((re.match(regex1,item))):\n #print(item)\n newitem=item[1:] #remove the hash\n BagOfHashes.append(newitem)\n hashcount=hashcount+1\n elif(re.match(regex2,item)):\n if(re.match(regex3,item) or re.match(regex4,item)):\n BagOfLinks.append(item)\n else:\n BagOfWords.append(item)\n wordcount=wordcount+1\n else:\n pass\n else:\n pass\n#print(linecount) \n#print(BagOfWords)\n#print(BagOfHashes)\n#print(BagOfLinks)\nBigBag=BagOfWords+BagOfHashes\n\n## create Word Cloud\nIgnoreThese=[] #other irrelevant words\nfiltered_words = [] #list of words ready for wordcloud\nfor word in BigBag:\n if (word.lower() not in stopwords.words()) and (word.lower() not in IgnoreThese):\n filtered_words.append(word.lower())\nword_string = \" \".join(filtered_words)\nwith open(\"wordcloud.txt\", \"w\") as f:\n f.write(word_string)\nwith open(\"tableau.txt\", \"w\") as f:\n for s in filtered_words:\n f.write(\"%s\\n\" % s)\nTwitterWordCloud = WordCloud(width = 800, height = 800, background_color = \"white\", stopwords = None,\n min_font_size = 10).generate(word_string)\nplt.figure(figsize = (8,8), facecolor = None)\nplt.imshow(TwitterWordCloud)\nplt.axis(\"off\")\nplt.tight_layout(pad = 0)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.