code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
from openfermion import QubitOperator, FermionOperator
from openfermion.transforms import jordan_wigner
from src.utils import QasmUtils, MatrixUtils
from src.ansatz_elements import AnsatzElement, DoubleExchange
import itertools
import numpy
class EfficientDoubleExchange(AnsatzElement):
def __init__(self, qubit_pair_1, qubit_pair_2, rescaled_parameter=False, parity_dependence=False, d_exc_correction=False):
self.qubit_pair_1 = qubit_pair_1
self.qubit_pair_2 = qubit_pair_2
self.rescaled_parameter = rescaled_parameter
self.parity_dependence = parity_dependence
self.d_exc_correction = d_exc_correction
super(EfficientDoubleExchange, self).__init__(element='d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),
element_type=str(self), n_var_parameters=1)
@staticmethod
def second_angle(x):
if x == 0:
return 0
else:
tan_x = numpy.tan(x)
tan_x_squared = tan_x**2
tan_y = ((-tan_x_squared - 1 + numpy.sqrt(tan_x_squared ** 2 + 6 * tan_x_squared + 1)) / (2*tan_x))
return numpy.arctan(tan_y)
# this method constructs an operation that acts approximately as a double partial exchange
@staticmethod
def double_exchange(angle, qubit_pair_1, qubit_pair_2, parity_dependence=False, d_exc_correction=False):
assert len(qubit_pair_1) == 2
assert len(qubit_pair_2) == 2
theta_1 = numpy.pi/2 - angle
qasm = ['']
# 1st exchange + 0-2
qasm.append(QasmUtils.controlled_xz(qubit_pair_2[0], qubit_pair_1[0]))
qasm.append('ry({}) q[{}];\n'.format(theta_1, qubit_pair_2[0]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[0]))
qasm.append('ry({}) q[{}];\n'.format(-theta_1, qubit_pair_2[0]))
# 2nd exchange + 1-3
qasm.append(QasmUtils.controlled_xz(qubit_pair_2[1], qubit_pair_1[1]))
qasm.append('ry({}) q[{}];\n'.format(theta_1, qubit_pair_2[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[1], qubit_pair_2[1]))
qasm.append('ry({}) q[{}];\n'.format(-theta_1, qubit_pair_2[1]))
# CZ gates
qasm.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
# qasm.append('cz q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1]))
# correction 3rd order terms approximates the operation of a double exchange
if d_exc_correction:
angle_2 = DoubleExchange.second_angle(angle)
# not correcting 3rd order terms approximates the operation of a double excitation (with 3rd order error terms)
else:
angle_2 = angle
theta_2 = numpy.pi / 2 - angle_2
# 3rd exchange - 0-2
qasm.append('ry({}) q[{}];\n'.format(theta_2, qubit_pair_2[0]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[0]))
qasm.append('ry({}) q[{}];\n'.format(-theta_2, qubit_pair_2[0]))
qasm.append(QasmUtils.controlled_xz(qubit_pair_2[0], qubit_pair_1[0], reverse=True))
# 4th exchange -1-3
qasm.append('ry({}) q[{}];\n'.format(theta_2, qubit_pair_2[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[1], qubit_pair_2[1]))
qasm.append('ry({}) q[{}];\n'.format(-theta_2, qubit_pair_2[1]))
qasm.append(QasmUtils.controlled_xz(qubit_pair_2[1], qubit_pair_1[1], reverse=True))
# correcting for parameter sign
if parity_dependence:
# do not include the first qubit of the second pair
parity_qubits = list(range(min(qubit_pair_1), max(qubit_pair_1))) + list(range(min(qubit_pair_2)+1, max(qubit_pair_2)))
# ladder of CNOT used to determine the parity
cnot_ladder = ['']
for i in range(len(parity_qubits) - 1):
cnot_ladder.append('cx q[{}], q[{}];\n'.format(parity_qubits[i], parity_qubits[i+1]))
if angle > 0:
# applies a CZ correction in front, to get a negative sign for the excitation term, if the parity is 1
# (or the parity of "parity_qubits" is 0)
front = ['']
# this is the CZ that determines the sign of the excitation term
front.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
# this bit determines the parity and applies a CZ to negate the correction if the parity is wrong
front += cnot_ladder
front.append('x q[{}];\n'.format(parity_qubits[-1]))
front.append('cz q[{}], q[{}];\n'.format(parity_qubits[-1], qubit_pair_2[0]))
front.append('x q[{}];\n'.format(parity_qubits[-1]))
front += cnot_ladder[::-1]
# .. positive sign for the excitation term, if the parity is 0 (or the parity of "parity_qubits" is 1)
rear = ['']
# .. sign correction
rear.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
# .. parity correction
rear += cnot_ladder
rear.append('cz q[{}], q[{}];\n'.format(parity_qubits[-1], qubit_pair_2[0]))
rear += cnot_ladder[::-1]
# additional correction of states 010 and 110
rear.append('x q[{}];\n'.format(qubit_pair_2[1]))
rear.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
rear.append('x q[{}];\n'.format(qubit_pair_2[1]))
qasm = front + qasm + rear
else:
front = ['']
# sign correction
front.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
# parity correction
front += cnot_ladder
front.append('cz q[{}], q[{}];\n'.format(parity_qubits[-1], qubit_pair_2[0]))
front += cnot_ladder[::-1]
rear = ['']
# sign correction
rear.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
# parity correction
rear += cnot_ladder
rear.append('x q[{}];\n'.format(parity_qubits[-1]))
rear.append('cz q[{}], q[{}];\n'.format(parity_qubits[-1], qubit_pair_2[0]))
rear.append('x q[{}];\n'.format(parity_qubits[-1]))
rear += cnot_ladder[::-1]
# 010 and 011 correction
rear.append('x q[{}];\n'.format(qubit_pair_2[1]))
rear.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
rear.append('x q[{}];\n'.format(qubit_pair_2[1]))
qasm = front + qasm + rear
else:
if angle > 0:
# adding a correcting CZ gate at the end will result in a minus sign
qasm.append('cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]))
# qasm.append('cz q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1]))
else:
# adding a correcting CZ gate at the front will result in a plus sign
qasm = ['cz q[{}], q[{}];\n'.format(qubit_pair_2[0], qubit_pair_2[1]),
] \
+ qasm
# 'cz q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1])
return ''.join(qasm)
def get_qasm(self, var_parameters):
assert len(var_parameters) == 1
parameter = var_parameters[0]
# rescaled parameter (used for easier gradient optimization)
if self.rescaled_parameter:
if var_parameters[0] > 0:
parameter = var_parameters[0] + numpy.tanh(var_parameters[0]**0.5)
else:
parameter = var_parameters[0] + numpy.tanh(-(-var_parameters[0])**0.5)
return self.double_exchange(parameter, self.qubit_pair_1, self.qubit_pair_2,
parity_dependence=self.parity_dependence, d_exc_correction=self.d_exc_correction)
class EfficientDoubleExcitation2(AnsatzElement):
def __init__(self, qubit_pair_1, qubit_pair_2):
self.qubit_pair_1 = qubit_pair_1
self.qubit_pair_2 = qubit_pair_2
super(EfficientDoubleExcitation2, self).__init__(element='optimized_d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),
element_type=str(self), n_var_parameters=1)
@staticmethod
def efficient_double_excitation_2(angle, qubit_pair_1, qubit_pair_2):
qasm = ['']
theta = angle / 8
# determine the parity of the two pairs
qasm.append('cx q[{}], q[{}];\n'.format(*qubit_pair_1))
qasm.append('x q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('cx q[{}], q[{}];\n'.format(*qubit_pair_2))
qasm.append('x q[{}];\n'.format(qubit_pair_2[1]))
# apply a partial swap of qubits 0 and 2, controlled by 1 and 3 ##
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[0]))
# # partial ccc_y operation
qasm.append('rz({}) q[{}];\n'.format(numpy.pi/2, qubit_pair_1[0]))
qasm.append('rx({}) q[{}];\n'.format(theta, qubit_pair_1[0])) # +
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('rx({}) q[{}];\n'.format(-theta, qubit_pair_1[0])) # -
qasm.append('h q[{}];\n'.format(qubit_pair_2[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[1])) # 0 3
qasm.append('h q[{}];\n'.format(qubit_pair_2[1]))
qasm.append('rx({}) q[{}];\n'.format(theta, qubit_pair_1[0])) # +
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('rx({}) q[{}];\n'.format(-theta, qubit_pair_1[0])) # -
qasm.append('h q[{}];\n'.format(qubit_pair_2[0]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[0])) # 0 2
qasm.append('h q[{}];\n'.format(qubit_pair_2[0]))
qasm.append('rx({}) q[{}];\n'.format(theta, qubit_pair_1[0])) # +
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('rx({}) q[{}];\n'.format(-theta, qubit_pair_1[0])) # -
qasm.append('h q[{}];\n'.format(qubit_pair_2[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[1])) # 0 3
qasm.append('h q[{}];\n'.format(qubit_pair_2[1]))
qasm.append('rx({}) q[{}];\n'.format(theta, qubit_pair_1[0])) # +
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1
qasm.append('h q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('rx({}) q[{}];\n'.format(-theta, qubit_pair_1[0])) # -
qasm.append('rz({}) q[{}];\n'.format(-numpy.pi / 2, qubit_pair_1[0]))
##### test #####
# qasm.append('h q[{}];\n'.format(qubit_pair_2[0]))
# qasm.append('cx q[{}], q[{}];\n'.format(qubit_pair_1[0], qubit_pair_2[0])) # 0 2
# qasm.append('h q[{}];\n'.format(qubit_pair_2[0]))
# ###############
# partial ccc_y operation ############ to here
qasm.append(QasmUtils.controlled_xz(qubit_pair_1[0], qubit_pair_2[0], reverse=True))
# correct for parity determination
qasm.append('x q[{}];\n'.format(qubit_pair_1[1]))
qasm.append('cx q[{}], q[{}];\n'.format(*qubit_pair_1))
qasm.append('x q[{}];\n'.format(qubit_pair_2[1]))
qasm.append('cx q[{}], q[{}];\n'.format(*qubit_pair_2))
return ''.join(qasm)
def get_qasm(self, var_parameters):
assert len(var_parameters) == 1
parameter = var_parameters[0]
return self.efficient_double_excitation_2(parameter, self.qubit_pair_1, self.qubit_pair_2)
|
normal
|
{
"blob_id": "24cdbbadc8ff1c7ad5d42eeb518cb6c2b34724a2",
"index": 263,
"step-1": "<mask token>\n\n\nclass EfficientDoubleExcitation2(AnsatzElement):\n\n def __init__(self, qubit_pair_1, qubit_pair_2):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n super(EfficientDoubleExcitation2, self).__init__(element=\n 'optimized_d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),\n element_type=str(self), n_var_parameters=1)\n\n @staticmethod\n def efficient_double_excitation_2(angle, qubit_pair_1, qubit_pair_2):\n qasm = ['']\n theta = angle / 8\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('rz({}) q[{}];\\n'.format(numpy.pi / 2, qubit_pair_1[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('rz({}) q[{}];\\n'.format(-numpy.pi / 2, qubit_pair_1[0]))\n qasm.append(QasmUtils.controlled_xz(qubit_pair_1[0], qubit_pair_2[0\n ], reverse=True))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n return ''.join(qasm)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass EfficientDoubleExchange(AnsatzElement):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EfficientDoubleExcitation2(AnsatzElement):\n\n def __init__(self, qubit_pair_1, qubit_pair_2):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n super(EfficientDoubleExcitation2, self).__init__(element=\n 'optimized_d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),\n element_type=str(self), n_var_parameters=1)\n\n @staticmethod\n def efficient_double_excitation_2(angle, qubit_pair_1, qubit_pair_2):\n qasm = ['']\n theta = angle / 8\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('rz({}) q[{}];\\n'.format(numpy.pi / 2, qubit_pair_1[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('rz({}) q[{}];\\n'.format(-numpy.pi / 2, qubit_pair_1[0]))\n qasm.append(QasmUtils.controlled_xz(qubit_pair_1[0], qubit_pair_2[0\n ], reverse=True))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n return ''.join(qasm)\n\n def get_qasm(self, var_parameters):\n assert len(var_parameters) == 1\n parameter = var_parameters[0]\n return self.efficient_double_excitation_2(parameter, self.\n qubit_pair_1, self.qubit_pair_2)\n",
"step-3": "<mask token>\n\n\nclass EfficientDoubleExchange(AnsatzElement):\n\n def __init__(self, qubit_pair_1, qubit_pair_2, rescaled_parameter=False,\n parity_dependence=False, d_exc_correction=False):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n self.rescaled_parameter = rescaled_parameter\n self.parity_dependence = parity_dependence\n self.d_exc_correction = d_exc_correction\n super(EfficientDoubleExchange, self).__init__(element=\n 'd_exc {}, {}'.format(qubit_pair_1, qubit_pair_2), element_type\n =str(self), n_var_parameters=1)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EfficientDoubleExcitation2(AnsatzElement):\n\n def __init__(self, qubit_pair_1, qubit_pair_2):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n super(EfficientDoubleExcitation2, self).__init__(element=\n 'optimized_d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),\n element_type=str(self), n_var_parameters=1)\n\n @staticmethod\n def efficient_double_excitation_2(angle, qubit_pair_1, qubit_pair_2):\n qasm = ['']\n theta = angle / 8\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('rz({}) q[{}];\\n'.format(numpy.pi / 2, qubit_pair_1[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('rz({}) q[{}];\\n'.format(-numpy.pi / 2, qubit_pair_1[0]))\n qasm.append(QasmUtils.controlled_xz(qubit_pair_1[0], qubit_pair_2[0\n ], reverse=True))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n return ''.join(qasm)\n\n def get_qasm(self, var_parameters):\n assert len(var_parameters) == 1\n parameter = var_parameters[0]\n return self.efficient_double_excitation_2(parameter, self.\n qubit_pair_1, self.qubit_pair_2)\n",
"step-4": "<mask token>\n\n\nclass EfficientDoubleExchange(AnsatzElement):\n\n def __init__(self, qubit_pair_1, qubit_pair_2, rescaled_parameter=False,\n parity_dependence=False, d_exc_correction=False):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n self.rescaled_parameter = rescaled_parameter\n self.parity_dependence = parity_dependence\n self.d_exc_correction = d_exc_correction\n super(EfficientDoubleExchange, self).__init__(element=\n 'd_exc {}, {}'.format(qubit_pair_1, qubit_pair_2), element_type\n =str(self), n_var_parameters=1)\n\n @staticmethod\n def second_angle(x):\n if x == 0:\n return 0\n else:\n tan_x = numpy.tan(x)\n tan_x_squared = tan_x ** 2\n tan_y = (-tan_x_squared - 1 + numpy.sqrt(tan_x_squared ** 2 + 6 *\n tan_x_squared + 1)) / (2 * tan_x)\n return numpy.arctan(tan_y)\n <mask token>\n <mask token>\n\n\nclass EfficientDoubleExcitation2(AnsatzElement):\n\n def __init__(self, qubit_pair_1, qubit_pair_2):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n super(EfficientDoubleExcitation2, self).__init__(element=\n 'optimized_d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),\n element_type=str(self), n_var_parameters=1)\n\n @staticmethod\n def efficient_double_excitation_2(angle, qubit_pair_1, qubit_pair_2):\n qasm = ['']\n theta = angle / 8\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('rz({}) q[{}];\\n'.format(numpy.pi / 2, qubit_pair_1[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_2[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0],\n qubit_pair_1[1]))\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0]))\n qasm.append('rz({}) q[{}];\\n'.format(-numpy.pi / 2, qubit_pair_1[0]))\n qasm.append(QasmUtils.controlled_xz(qubit_pair_1[0], qubit_pair_2[0\n ], reverse=True))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n return ''.join(qasm)\n\n def get_qasm(self, var_parameters):\n assert len(var_parameters) == 1\n parameter = var_parameters[0]\n return self.efficient_double_excitation_2(parameter, self.\n qubit_pair_1, self.qubit_pair_2)\n",
"step-5": "from openfermion import QubitOperator, FermionOperator\nfrom openfermion.transforms import jordan_wigner\n\nfrom src.utils import QasmUtils, MatrixUtils\nfrom src.ansatz_elements import AnsatzElement, DoubleExchange\n\nimport itertools\nimport numpy\n\n\nclass EfficientDoubleExchange(AnsatzElement):\n def __init__(self, qubit_pair_1, qubit_pair_2, rescaled_parameter=False, parity_dependence=False, d_exc_correction=False):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n self.rescaled_parameter = rescaled_parameter\n self.parity_dependence = parity_dependence\n self.d_exc_correction = d_exc_correction\n super(EfficientDoubleExchange, self).__init__(element='d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),\n element_type=str(self), n_var_parameters=1)\n\n @staticmethod\n def second_angle(x):\n if x == 0:\n return 0\n else:\n tan_x = numpy.tan(x)\n tan_x_squared = tan_x**2\n tan_y = ((-tan_x_squared - 1 + numpy.sqrt(tan_x_squared ** 2 + 6 * tan_x_squared + 1)) / (2*tan_x))\n return numpy.arctan(tan_y)\n\n # this method constructs an operation that acts approximately as a double partial exchange\n @staticmethod\n def double_exchange(angle, qubit_pair_1, qubit_pair_2, parity_dependence=False, d_exc_correction=False):\n assert len(qubit_pair_1) == 2\n assert len(qubit_pair_2) == 2\n theta_1 = numpy.pi/2 - angle\n\n qasm = ['']\n\n # 1st exchange + 0-2\n qasm.append(QasmUtils.controlled_xz(qubit_pair_2[0], qubit_pair_1[0]))\n qasm.append('ry({}) q[{}];\\n'.format(theta_1, qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[0]))\n qasm.append('ry({}) q[{}];\\n'.format(-theta_1, qubit_pair_2[0]))\n\n # 2nd exchange + 1-3\n qasm.append(QasmUtils.controlled_xz(qubit_pair_2[1], qubit_pair_1[1]))\n qasm.append('ry({}) q[{}];\\n'.format(theta_1, qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[1], qubit_pair_2[1]))\n qasm.append('ry({}) q[{}];\\n'.format(-theta_1, qubit_pair_2[1]))\n\n # CZ gates\n qasm.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n # qasm.append('cz q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1]))\n\n # correction 3rd order terms approximates the operation of a double exchange\n if d_exc_correction:\n angle_2 = DoubleExchange.second_angle(angle)\n # not correcting 3rd order terms approximates the operation of a double excitation (with 3rd order error terms)\n else:\n angle_2 = angle\n theta_2 = numpy.pi / 2 - angle_2\n\n # 3rd exchange - 0-2\n qasm.append('ry({}) q[{}];\\n'.format(theta_2, qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[0]))\n qasm.append('ry({}) q[{}];\\n'.format(-theta_2, qubit_pair_2[0]))\n qasm.append(QasmUtils.controlled_xz(qubit_pair_2[0], qubit_pair_1[0], reverse=True))\n\n # 4th exchange -1-3\n qasm.append('ry({}) q[{}];\\n'.format(theta_2, qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[1], qubit_pair_2[1]))\n qasm.append('ry({}) q[{}];\\n'.format(-theta_2, qubit_pair_2[1]))\n qasm.append(QasmUtils.controlled_xz(qubit_pair_2[1], qubit_pair_1[1], reverse=True))\n\n # correcting for parameter sign\n if parity_dependence:\n # do not include the first qubit of the second pair\n parity_qubits = list(range(min(qubit_pair_1), max(qubit_pair_1))) + list(range(min(qubit_pair_2)+1, max(qubit_pair_2)))\n\n # ladder of CNOT used to determine the parity\n cnot_ladder = ['']\n for i in range(len(parity_qubits) - 1):\n cnot_ladder.append('cx q[{}], q[{}];\\n'.format(parity_qubits[i], parity_qubits[i+1]))\n\n if angle > 0:\n # applies a CZ correction in front, to get a negative sign for the excitation term, if the parity is 1\n # (or the parity of \"parity_qubits\" is 0)\n front = ['']\n # this is the CZ that determines the sign of the excitation term\n front.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n # this bit determines the parity and applies a CZ to negate the correction if the parity is wrong\n front += cnot_ladder\n front.append('x q[{}];\\n'.format(parity_qubits[-1]))\n front.append('cz q[{}], q[{}];\\n'.format(parity_qubits[-1], qubit_pair_2[0]))\n front.append('x q[{}];\\n'.format(parity_qubits[-1]))\n front += cnot_ladder[::-1]\n\n # .. positive sign for the excitation term, if the parity is 0 (or the parity of \"parity_qubits\" is 1)\n rear = ['']\n # .. sign correction\n rear.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n # .. parity correction\n rear += cnot_ladder\n rear.append('cz q[{}], q[{}];\\n'.format(parity_qubits[-1], qubit_pair_2[0]))\n rear += cnot_ladder[::-1]\n # additional correction of states 010 and 110\n rear.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n rear.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n rear.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n\n qasm = front + qasm + rear\n else:\n front = ['']\n # sign correction\n front.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n # parity correction\n front += cnot_ladder\n front.append('cz q[{}], q[{}];\\n'.format(parity_qubits[-1], qubit_pair_2[0]))\n front += cnot_ladder[::-1]\n\n rear = ['']\n # sign correction\n rear.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n # parity correction\n rear += cnot_ladder\n rear.append('x q[{}];\\n'.format(parity_qubits[-1]))\n rear.append('cz q[{}], q[{}];\\n'.format(parity_qubits[-1], qubit_pair_2[0]))\n rear.append('x q[{}];\\n'.format(parity_qubits[-1]))\n rear += cnot_ladder[::-1]\n # 010 and 011 correction\n rear.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n rear.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n rear.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n\n qasm = front + qasm + rear\n else:\n if angle > 0:\n # adding a correcting CZ gate at the end will result in a minus sign\n qasm.append('cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]))\n # qasm.append('cz q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1]))\n\n else:\n # adding a correcting CZ gate at the front will result in a plus sign\n qasm = ['cz q[{}], q[{}];\\n'.format(qubit_pair_2[0], qubit_pair_2[1]),\n ] \\\n + qasm\n # 'cz q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1])\n return ''.join(qasm)\n\n def get_qasm(self, var_parameters):\n assert len(var_parameters) == 1\n parameter = var_parameters[0]\n\n # rescaled parameter (used for easier gradient optimization)\n if self.rescaled_parameter:\n if var_parameters[0] > 0:\n parameter = var_parameters[0] + numpy.tanh(var_parameters[0]**0.5)\n else:\n parameter = var_parameters[0] + numpy.tanh(-(-var_parameters[0])**0.5)\n\n return self.double_exchange(parameter, self.qubit_pair_1, self.qubit_pair_2,\n parity_dependence=self.parity_dependence, d_exc_correction=self.d_exc_correction)\n\n\nclass EfficientDoubleExcitation2(AnsatzElement):\n def __init__(self, qubit_pair_1, qubit_pair_2):\n self.qubit_pair_1 = qubit_pair_1\n self.qubit_pair_2 = qubit_pair_2\n super(EfficientDoubleExcitation2, self).__init__(element='optimized_d_exc {}, {}'.format(qubit_pair_1, qubit_pair_2),\n element_type=str(self), n_var_parameters=1)\n\n @staticmethod\n def efficient_double_excitation_2(angle, qubit_pair_1, qubit_pair_2):\n qasm = ['']\n theta = angle / 8\n\n # determine the parity of the two pairs\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n\n # apply a partial swap of qubits 0 and 2, controlled by 1 and 3 ##\n\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[0]))\n # # partial ccc_y operation\n qasm.append('rz({}) q[{}];\\n'.format(numpy.pi/2, qubit_pair_1[0]))\n\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0])) # +\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0])) # -\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[1])) # 0 3\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0])) # +\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0])) # -\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[0])) # 0 2\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0])) # +\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0])) # -\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[1])) # 0 3\n qasm.append('h q[{}];\\n'.format(qubit_pair_2[1]))\n\n qasm.append('rx({}) q[{}];\\n'.format(theta, qubit_pair_1[0])) # +\n\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_1[1])) # 0 1\n qasm.append('h q[{}];\\n'.format(qubit_pair_1[1]))\n\n qasm.append('rx({}) q[{}];\\n'.format(-theta, qubit_pair_1[0])) # -\n\n qasm.append('rz({}) q[{}];\\n'.format(-numpy.pi / 2, qubit_pair_1[0]))\n\n ##### test #####\n # qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n # qasm.append('cx q[{}], q[{}];\\n'.format(qubit_pair_1[0], qubit_pair_2[0])) # 0 2\n # qasm.append('h q[{}];\\n'.format(qubit_pair_2[0]))\n # ###############\n\n # partial ccc_y operation ############ to here\n\n qasm.append(QasmUtils.controlled_xz(qubit_pair_1[0], qubit_pair_2[0], reverse=True))\n\n # correct for parity determination\n qasm.append('x q[{}];\\n'.format(qubit_pair_1[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_1))\n qasm.append('x q[{}];\\n'.format(qubit_pair_2[1]))\n qasm.append('cx q[{}], q[{}];\\n'.format(*qubit_pair_2))\n\n return ''.join(qasm)\n\n def get_qasm(self, var_parameters):\n assert len(var_parameters) == 1\n parameter = var_parameters[0]\n\n return self.efficient_double_excitation_2(parameter, self.qubit_pair_1, self.qubit_pair_2)\n\n",
"step-ids": [
3,
5,
6,
7,
11
]
}
|
[
3,
5,
6,
7,
11
] |
<|reserved_special_token_0|>
class Brainpool(VerifiableCurve):
<|reserved_special_token_0|>
def security(self):
self._secure = False
try:
curve = EllipticCurve(GF(self._p), [self._a, self._b])
except ArithmeticError:
return
order = curve.__pari__().ellsea(1)
if order == 0:
return
order = ZZ(order)
if order >= self._p:
return
if not order.is_prime():
return
self._embedding_degree = embedding_degree(prime=self._p, order=order)
if not (order - 1) / self._embedding_degree < 100:
return
if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7
):
return
self._cardinality = order
self._order = order
self._secure = True
<|reserved_special_token_0|>
def set_a(self):
self._a = find_integer(self._seed, self._bits)
def check_a(self):
if self._a is None:
return False
try:
c = -3 * self._field(self._a) ** -1
c.nth_root(4)
return True
except ValueError:
return False
def set_b(self, b_seed=None):
if b_seed is None:
b_seed = self._seed
self._b = find_integer(b_seed, self._bits)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set_seed(self, seed):
self._seed = seed
def generate_generator(self, seed=None):
"""Finds generator of curve as scalar*P where P has smallest x-coordinate"""
if seed is None:
seed = self._seed
scalar = find_integer(increment_seed(seed), self._bits)
x = None
for x in self._field:
if (x ** 3 + self._a * x + self._b).is_square():
break
y = (x ** 3 + self._a * x + self._b).sqrt()
y = ZZ(min(y, self._p - y))
point = scalar * self.curve()(x, y)
self._generator = point[0], point[1]
def find_curve(self):
"""Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed"""
self.set_a()
while True:
while not self.check_a():
self.seed_update()
self.set_a()
self.seed_update()
self.set_b()
while not self.check_b():
self.seed_update()
self.set_b()
if not self.secure():
self.seed_update()
continue
self.generate_generator()
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Brainpool(VerifiableCurve):
def __init__(self, seed, p):
super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)
self._standard = 'brainpool'
self._category = 'brainpool'
self._cofactor = 1
self._original_seed = seed
def security(self):
self._secure = False
try:
curve = EllipticCurve(GF(self._p), [self._a, self._b])
except ArithmeticError:
return
order = curve.__pari__().ellsea(1)
if order == 0:
return
order = ZZ(order)
if order >= self._p:
return
if not order.is_prime():
return
self._embedding_degree = embedding_degree(prime=self._p, order=order)
if not (order - 1) / self._embedding_degree < 100:
return
if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7
):
return
self._cardinality = order
self._order = order
self._secure = True
<|reserved_special_token_0|>
def set_a(self):
self._a = find_integer(self._seed, self._bits)
def check_a(self):
if self._a is None:
return False
try:
c = -3 * self._field(self._a) ** -1
c.nth_root(4)
return True
except ValueError:
return False
def set_b(self, b_seed=None):
if b_seed is None:
b_seed = self._seed
self._b = find_integer(b_seed, self._bits)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set_seed(self, seed):
self._seed = seed
def generate_generator(self, seed=None):
"""Finds generator of curve as scalar*P where P has smallest x-coordinate"""
if seed is None:
seed = self._seed
scalar = find_integer(increment_seed(seed), self._bits)
x = None
for x in self._field:
if (x ** 3 + self._a * x + self._b).is_square():
break
y = (x ** 3 + self._a * x + self._b).sqrt()
y = ZZ(min(y, self._p - y))
point = scalar * self.curve()(x, y)
self._generator = point[0], point[1]
def find_curve(self):
"""Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed"""
self.set_a()
while True:
while not self.check_a():
self.seed_update()
self.set_a()
self.seed_update()
self.set_b()
while not self.check_b():
self.seed_update()
self.set_b()
if not self.secure():
self.seed_update()
continue
self.generate_generator()
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Brainpool(VerifiableCurve):
def __init__(self, seed, p):
super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)
self._standard = 'brainpool'
self._category = 'brainpool'
self._cofactor = 1
self._original_seed = seed
def security(self):
self._secure = False
try:
curve = EllipticCurve(GF(self._p), [self._a, self._b])
except ArithmeticError:
return
order = curve.__pari__().ellsea(1)
if order == 0:
return
order = ZZ(order)
if order >= self._p:
return
if not order.is_prime():
return
self._embedding_degree = embedding_degree(prime=self._p, order=order)
if not (order - 1) / self._embedding_degree < 100:
return
if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7
):
return
self._cardinality = order
self._order = order
self._secure = True
<|reserved_special_token_0|>
def set_a(self):
self._a = find_integer(self._seed, self._bits)
def check_a(self):
if self._a is None:
return False
try:
c = -3 * self._field(self._a) ** -1
c.nth_root(4)
return True
except ValueError:
return False
def set_b(self, b_seed=None):
if b_seed is None:
b_seed = self._seed
self._b = find_integer(b_seed, self._bits)
def check_b(self):
return self._b is not None and not self._field(self._b).is_square()
<|reserved_special_token_0|>
def set_seed(self, seed):
self._seed = seed
def generate_generator(self, seed=None):
"""Finds generator of curve as scalar*P where P has smallest x-coordinate"""
if seed is None:
seed = self._seed
scalar = find_integer(increment_seed(seed), self._bits)
x = None
for x in self._field:
if (x ** 3 + self._a * x + self._b).is_square():
break
y = (x ** 3 + self._a * x + self._b).sqrt()
y = ZZ(min(y, self._p - y))
point = scalar * self.curve()(x, y)
self._generator = point[0], point[1]
def find_curve(self):
"""Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed"""
self.set_a()
while True:
while not self.check_a():
self.seed_update()
self.set_a()
self.seed_update()
self.set_b()
while not self.check_b():
self.seed_update()
self.set_b()
if not self.secure():
self.seed_update()
continue
self.generate_generator()
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Brainpool(VerifiableCurve):
def __init__(self, seed, p):
super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)
self._standard = 'brainpool'
self._category = 'brainpool'
self._cofactor = 1
self._original_seed = seed
def security(self):
self._secure = False
try:
curve = EllipticCurve(GF(self._p), [self._a, self._b])
except ArithmeticError:
return
order = curve.__pari__().ellsea(1)
if order == 0:
return
order = ZZ(order)
if order >= self._p:
return
if not order.is_prime():
return
self._embedding_degree = embedding_degree(prime=self._p, order=order)
if not (order - 1) / self._embedding_degree < 100:
return
if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7
):
return
self._cardinality = order
self._order = order
self._secure = True
def set_ab(self):
pass
def set_a(self):
self._a = find_integer(self._seed, self._bits)
def check_a(self):
if self._a is None:
return False
try:
c = -3 * self._field(self._a) ** -1
c.nth_root(4)
return True
except ValueError:
return False
def set_b(self, b_seed=None):
if b_seed is None:
b_seed = self._seed
self._b = find_integer(b_seed, self._bits)
def check_b(self):
return self._b is not None and not self._field(self._b).is_square()
def seed_update(self, offset=1):
self._seed = increment_seed(self._seed)
def set_seed(self, seed):
self._seed = seed
def generate_generator(self, seed=None):
"""Finds generator of curve as scalar*P where P has smallest x-coordinate"""
if seed is None:
seed = self._seed
scalar = find_integer(increment_seed(seed), self._bits)
x = None
for x in self._field:
if (x ** 3 + self._a * x + self._b).is_square():
break
y = (x ** 3 + self._a * x + self._b).sqrt()
y = ZZ(min(y, self._p - y))
point = scalar * self.curve()(x, y)
self._generator = point[0], point[1]
def find_curve(self):
"""Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed"""
self.set_a()
while True:
while not self.check_a():
self.seed_update()
self.set_a()
self.seed_update()
self.set_b()
while not self.check_b():
self.seed_update()
self.set_b()
if not self.secure():
self.seed_update()
continue
self.generate_generator()
break
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""Implementation of the Brainpool standard, see
https://tools.ietf.org/pdf/rfc5639.pdf#15
"""
from sage.all import ZZ, GF, EllipticCurve
from utils import increment_seed, embedding_degree, find_integer, SimulatedCurves, VerifiableCurve, \
class_number_check
CHECK_CLASS_NUMBER = False
def gen_brainpool_prime(seed: str, nbits: int) -> ZZ:
"""Generates a prime of length nbits out of 160bit seed s"""
while True:
p = find_integer(seed, nbits, brainpool_prime=True)
while not (p % 4 == 3 and p.is_prime()):
p += 1
if p.nbits() == nbits:
return p
seed = increment_seed(seed)
class Brainpool(VerifiableCurve):
def __init__(self, seed, p):
super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)
self._standard = "brainpool"
self._category = "brainpool"
self._cofactor = 1
self._original_seed = seed
def security(self):
self._secure = False
try:
curve = EllipticCurve(GF(self._p), [self._a, self._b])
except ArithmeticError:
return
order = curve.__pari__().ellsea(1)
if order == 0:
return
order = ZZ(order)
if order >= self._p:
return
if not order.is_prime():
return
self._embedding_degree = embedding_degree(prime=self._p, order=order)
if not (order - 1) / self._embedding_degree < 100:
return
if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7):
return
self._cardinality = order
self._order = order
self._secure = True
def set_ab(self):
pass
def set_a(self):
self._a = find_integer(self._seed, self._bits)
def check_a(self):
if self._a is None:
return False
try:
c = -3 * self._field(self._a) ** (-1)
c.nth_root(4)
return True
except ValueError:
return False
def set_b(self, b_seed=None):
if b_seed is None:
b_seed = self._seed
self._b = find_integer(b_seed, self._bits)
def check_b(self):
return self._b is not None and not self._field(self._b).is_square()
def seed_update(self, offset=1):
self._seed = increment_seed(self._seed)
def set_seed(self, seed):
self._seed = seed
def generate_generator(self, seed=None):
"""Finds generator of curve as scalar*P where P has smallest x-coordinate"""
if seed is None:
seed = self._seed
scalar = find_integer(increment_seed(seed), self._bits)
x = None
for x in self._field:
if (x ** 3 + self._a * x + self._b).is_square():
break
y = (x ** 3 + self._a * x + self._b).sqrt()
y = ZZ(min(y, self._p - y))
point = scalar * self.curve()(x, y)
self._generator = point[0], point[1]
def find_curve(self):
"""Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed"""
self.set_a()
while True:
while not self.check_a():
self.seed_update()
self.set_a()
self.seed_update()
self.set_b()
while not self.check_b():
self.seed_update()
self.set_b()
if not self.secure():
self.seed_update()
continue
self.generate_generator()
break
def generate_brainpool_curves(count: int, p: ZZ, initial_seed: str) -> SimulatedCurves:
"""This is an implementation of the Brainpool standard suitable for large-scale simulations
For more readable implementation, see 'brainpool_curve' above
"""
simulated_curves = SimulatedCurves("brainpool", p.nbits(), initial_seed, count)
curve = Brainpool(initial_seed, p)
b_seed = None
for _ in range(count):
if curve.not_defined():
curve.set_a()
if not curve.check_a():
curve.seed_update()
curve.clear()
continue
b_seed = increment_seed(curve.seed())
curve.set_b(b_seed)
if not curve.check_b():
b_seed = increment_seed(b_seed)
continue
if not curve.secure():
curve.set_seed(increment_seed(b_seed))
curve.clear()
continue
curve.generate_generator(b_seed)
curve.compute_properties()
simulated_curves.add_curve(curve)
curve = Brainpool(curve.seed(), p)
curve.seed_update()
return simulated_curves
|
flexible
|
{
"blob_id": "b717abaeecea2e97c6ec78d3e0e4c97a8de5eec3",
"index": 9169,
"step-1": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n <mask token>\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n <mask token>\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n <mask token>\n <mask token>\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = 'brainpool'\n self._category = 'brainpool'\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n <mask token>\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n <mask token>\n <mask token>\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = 'brainpool'\n self._category = 'brainpool'\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n <mask token>\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n\n def check_b(self):\n return self._b is not None and not self._field(self._b).is_square()\n <mask token>\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Brainpool(VerifiableCurve):\n\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = 'brainpool'\n self._category = 'brainpool'\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7\n ):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n\n def set_ab(self):\n pass\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** -1\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n\n def check_b(self):\n return self._b is not None and not self._field(self._b).is_square()\n\n def seed_update(self, offset=1):\n self._seed = increment_seed(self._seed)\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\n<mask token>\n",
"step-5": "\"\"\"Implementation of the Brainpool standard, see\n https://tools.ietf.org/pdf/rfc5639.pdf#15\n\"\"\"\nfrom sage.all import ZZ, GF, EllipticCurve\nfrom utils import increment_seed, embedding_degree, find_integer, SimulatedCurves, VerifiableCurve, \\\n class_number_check\n\nCHECK_CLASS_NUMBER = False\n\n\ndef gen_brainpool_prime(seed: str, nbits: int) -> ZZ:\n \"\"\"Generates a prime of length nbits out of 160bit seed s\"\"\"\n while True:\n p = find_integer(seed, nbits, brainpool_prime=True)\n while not (p % 4 == 3 and p.is_prime()):\n p += 1\n if p.nbits() == nbits:\n return p\n seed = increment_seed(seed)\n\n\nclass Brainpool(VerifiableCurve):\n def __init__(self, seed, p):\n super().__init__(seed, p, cofactor_bound=1, cofactor_div=1)\n self._standard = \"brainpool\"\n self._category = \"brainpool\"\n self._cofactor = 1\n self._original_seed = seed\n\n def security(self):\n self._secure = False\n try:\n curve = EllipticCurve(GF(self._p), [self._a, self._b])\n except ArithmeticError:\n return\n order = curve.__pari__().ellsea(1)\n if order == 0:\n return\n order = ZZ(order)\n if order >= self._p:\n return\n if not order.is_prime():\n return\n self._embedding_degree = embedding_degree(prime=self._p, order=order)\n if not (order - 1) / self._embedding_degree < 100:\n return\n if CHECK_CLASS_NUMBER and not class_number_check(curve, order, 10 ** 7):\n return\n self._cardinality = order\n self._order = order\n self._secure = True\n\n def set_ab(self):\n pass\n\n def set_a(self):\n self._a = find_integer(self._seed, self._bits)\n\n def check_a(self):\n if self._a is None:\n return False\n try:\n c = -3 * self._field(self._a) ** (-1)\n c.nth_root(4)\n return True\n except ValueError:\n return False\n\n def set_b(self, b_seed=None):\n if b_seed is None:\n b_seed = self._seed\n self._b = find_integer(b_seed, self._bits)\n\n def check_b(self):\n return self._b is not None and not self._field(self._b).is_square()\n\n def seed_update(self, offset=1):\n self._seed = increment_seed(self._seed)\n\n def set_seed(self, seed):\n self._seed = seed\n\n def generate_generator(self, seed=None):\n \"\"\"Finds generator of curve as scalar*P where P has smallest x-coordinate\"\"\"\n if seed is None:\n seed = self._seed\n scalar = find_integer(increment_seed(seed), self._bits)\n x = None\n for x in self._field:\n if (x ** 3 + self._a * x + self._b).is_square():\n break\n y = (x ** 3 + self._a * x + self._b).sqrt()\n y = ZZ(min(y, self._p - y))\n point = scalar * self.curve()(x, y)\n self._generator = point[0], point[1]\n\n def find_curve(self):\n \"\"\"Generates one Brainpool curve over F_p (number of bits of p is nbits) out of 160bit seed\"\"\"\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break\n\n\ndef generate_brainpool_curves(count: int, p: ZZ, initial_seed: str) -> SimulatedCurves:\n \"\"\"This is an implementation of the Brainpool standard suitable for large-scale simulations\n For more readable implementation, see 'brainpool_curve' above\n \"\"\"\n simulated_curves = SimulatedCurves(\"brainpool\", p.nbits(), initial_seed, count)\n curve = Brainpool(initial_seed, p)\n b_seed = None\n for _ in range(count):\n if curve.not_defined():\n curve.set_a()\n if not curve.check_a():\n curve.seed_update()\n curve.clear()\n continue\n b_seed = increment_seed(curve.seed())\n curve.set_b(b_seed)\n if not curve.check_b():\n b_seed = increment_seed(b_seed)\n continue\n if not curve.secure():\n curve.set_seed(increment_seed(b_seed))\n curve.clear()\n continue\n curve.generate_generator(b_seed)\n curve.compute_properties()\n simulated_curves.add_curve(curve)\n curve = Brainpool(curve.seed(), p)\n curve.seed_update()\n\n return simulated_curves\n",
"step-ids": [
8,
9,
10,
12,
17
]
}
|
[
8,
9,
10,
12,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while t:
x = list(map(int, input().split()))
x.sort()
if x[0] + x[1] == x[2]:
print('YES')
else:
print('NO')
t -= 1
<|reserved_special_token_1|>
t = int(input())
while t:
x = list(map(int, input().split()))
x.sort()
if x[0] + x[1] == x[2]:
print('YES')
else:
print('NO')
t -= 1
<|reserved_special_token_1|>
t = int(input())
while t:
x = list(map(int, input().split()))
x.sort()
if(x[0]+x[1]==x[2]):
print("YES")
else:
print("NO")
t-=1
|
flexible
|
{
"blob_id": "d1200006b8d7a18b11b01eff4fbf38d9dfd8958e",
"index": 5758,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n x = list(map(int, input().split()))\n x.sort()\n if x[0] + x[1] == x[2]:\n print('YES')\n else:\n print('NO')\n t -= 1\n",
"step-3": "t = int(input())\nwhile t:\n x = list(map(int, input().split()))\n x.sort()\n if x[0] + x[1] == x[2]:\n print('YES')\n else:\n print('NO')\n t -= 1\n",
"step-4": "t = int(input())\r\nwhile t:\r\n\tx = list(map(int, input().split()))\r\n\tx.sort()\r\n\tif(x[0]+x[1]==x[2]):\r\n\t\tprint(\"YES\")\r\n\telse:\r\n\t\tprint(\"NO\")\r\n\tt-=1",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class AutoTestCase(APITestCase):
<|reserved_special_token_0|>
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = (
REQUESTS_DATA.get(self._testMethodName))
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
data.popitem()
response = self.send_request(request_method, self.path,
data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
<|reserved_special_token_0|>
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'
)(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__,
path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_serializer(endpoint, method_name, dict_key='in'):
"""
Возвращает класс сериалайзера, если тот есть для данного поинта и метода.
:param `ApiEndpoint` endpoint: Поинт.
:param str method_name: Метод.
:param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.
:return: Класс сериалайзера либо None.
"""
methods = [method_name]
if method_name == 'PATCH':
methods.append('PUT')
for method in methods:
if method in endpoint.serializer_classes and isinstance(endpoint.
serializer_classes[method], dict
) and dict_key in endpoint.serializer_classes[method]:
return endpoint.serializer_classes[method][dict_key]
def resolve_deferred(value):
"""
Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.
:param any value: Любой объект.
"""
if isinstance(value, Deferred):
obj = model_instance(value.model, value.force_create)
return obj.pk
elif isinstance(value, dict):
return {resolve_deferred(k): resolve_deferred(v) for k, v in value.
items()}
elif isinstance(value, list):
return [resolve_deferred(v) for v in value]
return value
<|reserved_special_token_0|>
class AutoTestCase(APITestCase):
"""
Класс для автоматического тестирования REST ручек.
"""
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = (
REQUESTS_DATA.get(self._testMethodName))
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
data.popitem()
response = self.send_request(request_method, self.path,
data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
def prepare_request_data(self, field, only_required=False):
"""
Подготавливает данные для запроса.
:param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.
:param bool only_required: Использовать ли только обязательные поля.
:return: Данные для отправки клиентом.
:rtype: list, dict.
"""
if isinstance(field, serializers.SerializerMetaclass):
return self.prepare_request_data(field())
elif hasattr(field, 'test_helper_value'):
return resolve_deferred(field.test_helper_value)
elif isinstance(field, serializers.ListSerializer):
return [self.prepare_request_data(field.child)]
elif isinstance(field, serializers.BaseSerializer):
return {k: self.prepare_request_data(v) for k, v in field.
get_fields().items() if not only_required or only_required and
v.required}
elif isinstance(field, serializers.ChoiceField):
for val, verbose in field.choices.items():
return val
elif isinstance(field, serializers.PrimaryKeyRelatedField):
return model_instance(field.queryset.model).pk
elif isinstance(field, serializers.CharField):
return 'test'
elif isinstance(field, serializers.IntegerField):
return 1
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'
)(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__,
path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
def check_response_data(self, data, field):
"""
Проверяем данные в ответе.
:param any data: Словарь `Response.data` либо одно из его значений.
:param any field: Сериалайзер или поле для сравнения данных в ответе.
"""
"""
if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:
serializer = self.endpoint.serializer_classes[method_name]['out'](
self.endpoint.callback.cls.queryset, many=True)
self.assertEqual(response.data, serializer.data)
"""
if isinstance(field, serializers.SerializerMetaclass):
return self.check_response_data(data, field())
"""
if 'results' in data and 'count' in data:
for item in data['results']:
self.check_response_data(item, out_fields)
else:
for field_name, value in data.items():
try:
field_data = fields[field_name]
except:
import pdb; pdb.set_trace()
# Проверка наличия филда среди ожидаемых в ответе
self.assertTrue(field_name in available_fields)
available_fields.remove(field_name)
if field_name in required_fields:
required_fields.remove(field_name)
if field_data['sub_fields']:
if hasattr(field_data['field_instance'], 'test_helper_as_dict'):
for key, item in data[field_name].items():
self.check_response_data(item, field_data['sub_fields'])
else:
self.check_response_data(data[field_name], field_data['sub_fields'])
else:
field_instance = field_data['field_instance']
# Проверка значения если филд обязателен или имеется значение в ответе
if field_data['required'] or value is not None:
# Проверка типа филда
self.assertEquals(type(field_instance.to_representation(value)), type(value))
# Проверка коррекности значения (иначе возникнет исключение)
# self.assertRaises(ValidationError, field_instance.to_internal_value(value))
field_instance.to_internal_value(value)
# Проверяем чтобы все обязательные поля в ответе были
self.assertEqual(len(required_fields), 0)
"""
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_serializer(endpoint, method_name, dict_key='in'):
"""
Возвращает класс сериалайзера, если тот есть для данного поинта и метода.
:param `ApiEndpoint` endpoint: Поинт.
:param str method_name: Метод.
:param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.
:return: Класс сериалайзера либо None.
"""
methods = [method_name]
if method_name == 'PATCH':
methods.append('PUT')
for method in methods:
if method in endpoint.serializer_classes and isinstance(endpoint.
serializer_classes[method], dict
) and dict_key in endpoint.serializer_classes[method]:
return endpoint.serializer_classes[method][dict_key]
def resolve_deferred(value):
"""
Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.
:param any value: Любой объект.
"""
if isinstance(value, Deferred):
obj = model_instance(value.model, value.force_create)
return obj.pk
elif isinstance(value, dict):
return {resolve_deferred(k): resolve_deferred(v) for k, v in value.
items()}
elif isinstance(value, list):
return [resolve_deferred(v) for v in value]
return value
def model_instance(model, force_create=False):
"""
Создание и получение экземпляра модели.
:param any model: Модель.
:param bool force_create: Не получать имеющийся объект, а создавать новый.
:return: Экзмепляр модели.
:rtype: models.Model.
"""
if not force_create and model.objects.all().count() > 0:
return model.objects.first()
data = {}
for field in model._meta.get_fields():
if not field.auto_created and not field.blank:
if hasattr(field, 'choices') and len(field.choices) > 0:
data[field.name] = field.choices[0][0]
elif isinstance(field, models.IntegerField):
data[field.name] = 1
elif isinstance(field, models.ForeignKey):
data[field.name] = model_instance(field.related_model)
elif isinstance(field, models.CharField):
data[field.name] = 'test'
return model.objects.create(**data)
class AutoTestCase(APITestCase):
"""
Класс для автоматического тестирования REST ручек.
"""
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = (
REQUESTS_DATA.get(self._testMethodName))
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
data.popitem()
response = self.send_request(request_method, self.path,
data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
def prepare_request_data(self, field, only_required=False):
"""
Подготавливает данные для запроса.
:param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.
:param bool only_required: Использовать ли только обязательные поля.
:return: Данные для отправки клиентом.
:rtype: list, dict.
"""
if isinstance(field, serializers.SerializerMetaclass):
return self.prepare_request_data(field())
elif hasattr(field, 'test_helper_value'):
return resolve_deferred(field.test_helper_value)
elif isinstance(field, serializers.ListSerializer):
return [self.prepare_request_data(field.child)]
elif isinstance(field, serializers.BaseSerializer):
return {k: self.prepare_request_data(v) for k, v in field.
get_fields().items() if not only_required or only_required and
v.required}
elif isinstance(field, serializers.ChoiceField):
for val, verbose in field.choices.items():
return val
elif isinstance(field, serializers.PrimaryKeyRelatedField):
return model_instance(field.queryset.model).pk
elif isinstance(field, serializers.CharField):
return 'test'
elif isinstance(field, serializers.IntegerField):
return 1
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'
)(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__,
path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
def check_response_data(self, data, field):
"""
Проверяем данные в ответе.
:param any data: Словарь `Response.data` либо одно из его значений.
:param any field: Сериалайзер или поле для сравнения данных в ответе.
"""
"""
if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:
serializer = self.endpoint.serializer_classes[method_name]['out'](
self.endpoint.callback.cls.queryset, many=True)
self.assertEqual(response.data, serializer.data)
"""
if isinstance(field, serializers.SerializerMetaclass):
return self.check_response_data(data, field())
"""
if 'results' in data and 'count' in data:
for item in data['results']:
self.check_response_data(item, out_fields)
else:
for field_name, value in data.items():
try:
field_data = fields[field_name]
except:
import pdb; pdb.set_trace()
# Проверка наличия филда среди ожидаемых в ответе
self.assertTrue(field_name in available_fields)
available_fields.remove(field_name)
if field_name in required_fields:
required_fields.remove(field_name)
if field_data['sub_fields']:
if hasattr(field_data['field_instance'], 'test_helper_as_dict'):
for key, item in data[field_name].items():
self.check_response_data(item, field_data['sub_fields'])
else:
self.check_response_data(data[field_name], field_data['sub_fields'])
else:
field_instance = field_data['field_instance']
# Проверка значения если филд обязателен или имеется значение в ответе
if field_data['required'] or value is not None:
# Проверка типа филда
self.assertEquals(type(field_instance.to_representation(value)), type(value))
# Проверка коррекности значения (иначе возникнет исключение)
# self.assertRaises(ValidationError, field_instance.to_internal_value(value))
field_instance.to_internal_value(value)
# Проверяем чтобы все обязательные поля в ответе были
self.assertEqual(len(required_fields), 0)
"""
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log = logging.getLogger(__name__)
def get_serializer(endpoint, method_name, dict_key='in'):
"""
Возвращает класс сериалайзера, если тот есть для данного поинта и метода.
:param `ApiEndpoint` endpoint: Поинт.
:param str method_name: Метод.
:param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.
:return: Класс сериалайзера либо None.
"""
methods = [method_name]
if method_name == 'PATCH':
methods.append('PUT')
for method in methods:
if method in endpoint.serializer_classes and isinstance(endpoint.
serializer_classes[method], dict
) and dict_key in endpoint.serializer_classes[method]:
return endpoint.serializer_classes[method][dict_key]
def resolve_deferred(value):
"""
Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.
:param any value: Любой объект.
"""
if isinstance(value, Deferred):
obj = model_instance(value.model, value.force_create)
return obj.pk
elif isinstance(value, dict):
return {resolve_deferred(k): resolve_deferred(v) for k, v in value.
items()}
elif isinstance(value, list):
return [resolve_deferred(v) for v in value]
return value
def model_instance(model, force_create=False):
"""
Создание и получение экземпляра модели.
:param any model: Модель.
:param bool force_create: Не получать имеющийся объект, а создавать новый.
:return: Экзмепляр модели.
:rtype: models.Model.
"""
if not force_create and model.objects.all().count() > 0:
return model.objects.first()
data = {}
for field in model._meta.get_fields():
if not field.auto_created and not field.blank:
if hasattr(field, 'choices') and len(field.choices) > 0:
data[field.name] = field.choices[0][0]
elif isinstance(field, models.IntegerField):
data[field.name] = 1
elif isinstance(field, models.ForeignKey):
data[field.name] = model_instance(field.related_model)
elif isinstance(field, models.CharField):
data[field.name] = 'test'
return model.objects.create(**data)
class AutoTestCase(APITestCase):
"""
Класс для автоматического тестирования REST ручек.
"""
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = (
REQUESTS_DATA.get(self._testMethodName))
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
response = self.send_request(request_method, self.path,
data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
data = self.prepare_request_data(self.serializer,
only_required=True)
data.popitem()
response = self.send_request(request_method, self.path,
data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
def prepare_request_data(self, field, only_required=False):
"""
Подготавливает данные для запроса.
:param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.
:param bool only_required: Использовать ли только обязательные поля.
:return: Данные для отправки клиентом.
:rtype: list, dict.
"""
if isinstance(field, serializers.SerializerMetaclass):
return self.prepare_request_data(field())
elif hasattr(field, 'test_helper_value'):
return resolve_deferred(field.test_helper_value)
elif isinstance(field, serializers.ListSerializer):
return [self.prepare_request_data(field.child)]
elif isinstance(field, serializers.BaseSerializer):
return {k: self.prepare_request_data(v) for k, v in field.
get_fields().items() if not only_required or only_required and
v.required}
elif isinstance(field, serializers.ChoiceField):
for val, verbose in field.choices.items():
return val
elif isinstance(field, serializers.PrimaryKeyRelatedField):
return model_instance(field.queryset.model).pk
elif isinstance(field, serializers.CharField):
return 'test'
elif isinstance(field, serializers.IntegerField):
return 1
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'
)(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__,
path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
def check_response_data(self, data, field):
"""
Проверяем данные в ответе.
:param any data: Словарь `Response.data` либо одно из его значений.
:param any field: Сериалайзер или поле для сравнения данных в ответе.
"""
"""
if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:
serializer = self.endpoint.serializer_classes[method_name]['out'](
self.endpoint.callback.cls.queryset, many=True)
self.assertEqual(response.data, serializer.data)
"""
if isinstance(field, serializers.SerializerMetaclass):
return self.check_response_data(data, field())
"""
if 'results' in data and 'count' in data:
for item in data['results']:
self.check_response_data(item, out_fields)
else:
for field_name, value in data.items():
try:
field_data = fields[field_name]
except:
import pdb; pdb.set_trace()
# Проверка наличия филда среди ожидаемых в ответе
self.assertTrue(field_name in available_fields)
available_fields.remove(field_name)
if field_name in required_fields:
required_fields.remove(field_name)
if field_data['sub_fields']:
if hasattr(field_data['field_instance'], 'test_helper_as_dict'):
for key, item in data[field_name].items():
self.check_response_data(item, field_data['sub_fields'])
else:
self.check_response_data(data[field_name], field_data['sub_fields'])
else:
field_instance = field_data['field_instance']
# Проверка значения если филд обязателен или имеется значение в ответе
if field_data['required'] or value is not None:
# Проверка типа филда
self.assertEquals(type(field_instance.to_representation(value)), type(value))
# Проверка коррекности значения (иначе возникнет исключение)
# self.assertRaises(ValidationError, field_instance.to_internal_value(value))
field_instance.to_internal_value(value)
# Проверяем чтобы все обязательные поля в ответе были
self.assertEqual(len(required_fields), 0)
"""
ENDPOINTS = ApiDocumentation().get_endpoints()
ENDPOINTS = [ep for ep in ENDPOINTS]
REQUESTS_LIST = []
for endpoint in ENDPOINTS:
for method in endpoint.allowed_methods:
serializer = get_serializer(endpoint, method)
if serializer:
for request_type in ('all', 'only_required'):
REQUESTS_LIST.append((endpoint, method, serializer,
request_type))
else:
REQUESTS_LIST.append((endpoint, method, serializer, None))
REQUESTS_DATA = {}
for endpoint, method, serializer, request_type in REQUESTS_LIST:
method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method,
request_type)
REQUESTS_DATA[method_name] = endpoint, method, serializer, request_type
setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)
<|reserved_special_token_1|>
import logging
from django.contrib.auth import get_user_model
from django.db import models
from rest_framework import serializers
from rest_framework.test import APITestCase
from ..autodocs.docs import ApiDocumentation
from .utils import Deferred
log = logging.getLogger(__name__)
def get_serializer(endpoint, method_name, dict_key='in'):
"""
Возвращает класс сериалайзера, если тот есть для данного поинта и метода.
:param `ApiEndpoint` endpoint: Поинт.
:param str method_name: Метод.
:param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.
:return: Класс сериалайзера либо None.
"""
methods = [method_name]
# Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT.
if method_name == 'PATCH':
methods.append('PUT')
for method in methods:
if method in endpoint.serializer_classes and \
isinstance(endpoint.serializer_classes[method], dict) and \
dict_key in endpoint.serializer_classes[method]:
return endpoint.serializer_classes[method][dict_key]
def resolve_deferred(value):
"""
Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.
:param any value: Любой объект.
"""
if isinstance(value, Deferred):
obj = model_instance(value.model, value.force_create)
return obj.pk
elif isinstance(value, dict):
return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()}
elif isinstance(value, list):
return [resolve_deferred(v) for v in value]
return value
def model_instance(model, force_create=False):
"""
Создание и получение экземпляра модели.
:param any model: Модель.
:param bool force_create: Не получать имеющийся объект, а создавать новый.
:return: Экзмепляр модели.
:rtype: models.Model.
"""
if not force_create and model.objects.all().count() > 0:
return model.objects.first()
data = {}
for field in model._meta.get_fields():
if not field.auto_created and not field.blank:
if hasattr(field, 'choices') and len(field.choices) > 0:
data[field.name] = field.choices[0][0]
elif isinstance(field, models.IntegerField):
data[field.name] = 1
elif isinstance(field, models.ForeignKey):
data[field.name] = model_instance(field.related_model)
elif isinstance(field, models.CharField):
data[field.name] = 'test'
return model.objects.create(**data)
class AutoTestCase(APITestCase):
"""
Класс для автоматического тестирования REST ручек.
"""
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName)
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
# Запрос со всеми данными на входе.
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path, data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
# Запрос только с обязательными данными.
data = self.prepare_request_data(self.serializer, only_required=True)
response = self.send_request(request_method, self.path, data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
# Запрос не со всеми обязательными данными.
data = self.prepare_request_data(self.serializer, only_required=True)
data.popitem()
response = self.send_request(request_method, self.path, data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
# Запрос без данных на входе.
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
def prepare_request_data(self, field, only_required=False):
"""
Подготавливает данные для запроса.
:param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.
:param bool only_required: Использовать ли только обязательные поля.
:return: Данные для отправки клиентом.
:rtype: list, dict.
"""
# Если это класс сериалайзера, а не его экземпляр.
if isinstance(field, serializers.SerializerMetaclass):
return self.prepare_request_data(field())
# Либо имеется тестовое значение установленное через `test_helper_factory`.
elif hasattr(field, 'test_helper_value'):
return resolve_deferred(field.test_helper_value)
# Либо это список.
elif isinstance(field, serializers.ListSerializer):
return [self.prepare_request_data(field.child)]
# Либо это экземпляр сериалайзера.
elif isinstance(field, serializers.BaseSerializer):
return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \
if (not only_required) or (only_required and v.required)}
# Либо это поле.
elif isinstance(field, serializers.ChoiceField):
for val, verbose in field.choices.items():
return val
elif isinstance(field, serializers.PrimaryKeyRelatedField):
return model_instance(field.queryset.model).pk
elif isinstance(field, serializers.CharField):
return 'test'
elif isinstance(field, serializers.IntegerField):
return 1
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
def check_response_data(self, data, field):
"""
Проверяем данные в ответе.
:param any data: Словарь `Response.data` либо одно из его значений.
:param any field: Сериалайзер или поле для сравнения данных в ответе.
"""
# @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна
# т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError.
'''
if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \
'out' in self.endpoint.serializer_classes[method_name]:
serializer = self.endpoint.serializer_classes[method_name]['out'](
self.endpoint.callback.cls.queryset, many=True)
self.assertEqual(response.data, serializer.data)
'''
# Если это класс сериалайзера, а не его экземпляр.
if isinstance(field, serializers.SerializerMetaclass):
return self.check_response_data(data, field())
'''
if 'results' in data and 'count' in data:
for item in data['results']:
self.check_response_data(item, out_fields)
else:
for field_name, value in data.items():
try:
field_data = fields[field_name]
except:
import pdb; pdb.set_trace()
# Проверка наличия филда среди ожидаемых в ответе
self.assertTrue(field_name in available_fields)
available_fields.remove(field_name)
if field_name in required_fields:
required_fields.remove(field_name)
if field_data['sub_fields']:
if hasattr(field_data['field_instance'], 'test_helper_as_dict'):
for key, item in data[field_name].items():
self.check_response_data(item, field_data['sub_fields'])
else:
self.check_response_data(data[field_name], field_data['sub_fields'])
else:
field_instance = field_data['field_instance']
# Проверка значения если филд обязателен или имеется значение в ответе
if field_data['required'] or value is not None:
# Проверка типа филда
self.assertEquals(type(field_instance.to_representation(value)), type(value))
# Проверка коррекности значения (иначе возникнет исключение)
# self.assertRaises(ValidationError, field_instance.to_internal_value(value))
field_instance.to_internal_value(value)
# Проверяем чтобы все обязательные поля в ответе были
self.assertEqual(len(required_fields), 0)
'''
ENDPOINTS = ApiDocumentation().get_endpoints()
ENDPOINTS = [ep for ep in ENDPOINTS]
# Собираем список запросов.
REQUESTS_LIST = []
for endpoint in ENDPOINTS:
for method in endpoint.allowed_methods:
serializer = get_serializer(endpoint, method)
if serializer:
# @TODO: Доработать тестирование без обязательных данных в запросе (without_required).
# for request_type in ('all', 'only_required', 'without_required'):
for request_type in ('all', 'only_required'):
REQUESTS_LIST.append((endpoint, method, serializer, request_type))
else:
REQUESTS_LIST.append((endpoint, method, serializer, None))
REQUESTS_DATA = {}
# Добавляем для них тестовые методы.
for endpoint, method, serializer, request_type in REQUESTS_LIST:
method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type)
REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type)
setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)
|
flexible
|
{
"blob_id": "04822e735c9c27f0e0fcc9727bcc38d2da84dee6",
"index": 7831,
"step-1": "<mask token>\n\n\nclass AutoTestCase(APITestCase):\n <mask token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n <mask token>\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\n<mask token>\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\n<mask token>\n",
"step-4": "<mask token>\nlog = logging.getLogger(__name__)\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\nENDPOINTS = ApiDocumentation().get_endpoints()\nENDPOINTS = [ep for ep in ENDPOINTS]\nREQUESTS_LIST = []\nfor endpoint in ENDPOINTS:\n for method in endpoint.allowed_methods:\n serializer = get_serializer(endpoint, method)\n if serializer:\n for request_type in ('all', 'only_required'):\n REQUESTS_LIST.append((endpoint, method, serializer,\n request_type))\n else:\n REQUESTS_LIST.append((endpoint, method, serializer, None))\nREQUESTS_DATA = {}\nfor endpoint, method, serializer, request_type in REQUESTS_LIST:\n method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method,\n request_type)\n REQUESTS_DATA[method_name] = endpoint, method, serializer, request_type\n setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)\n",
"step-5": "import logging\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom rest_framework import serializers\nfrom rest_framework.test import APITestCase\n\nfrom ..autodocs.docs import ApiDocumentation\n\nfrom .utils import Deferred\n\nlog = logging.getLogger(__name__)\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n # Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT.\n if method_name == 'PATCH':\n methods.append('PUT')\n\n for method in methods:\n if method in endpoint.serializer_classes and \\\n isinstance(endpoint.serializer_classes[method], dict) and \\\n dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName)\n\n path = self.endpoint.path\n\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n\n self.path = path\n\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n\n if self.serializer:\n if self.request_type == 'all':\n # Запрос со всеми данными на входе.\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path, data, 'json')\n self.check_response_is_valid(response)\n\n elif self.request_type == 'only_required':\n # Запрос только с обязательными данными.\n data = self.prepare_request_data(self.serializer, only_required=True)\n response = self.send_request(request_method, self.path, data, 'json')\n self.check_response_is_valid(response)\n\n elif self.request_type == 'without_required':\n # Запрос не со всеми обязательными данными.\n data = self.prepare_request_data(self.serializer, only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path, data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n\n else:\n # Запрос без данных на входе.\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n # Если это класс сериалайзера, а не его экземпляр.\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n\n # Либо имеется тестовое значение установленное через `test_helper_factory`.\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n\n # Либо это список.\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n\n # Либо это экземпляр сериалайзера.\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \\\n if (not only_required) or (only_required and v.required)}\n\n # Либо это поле.\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n\n elif isinstance(field, serializers.CharField):\n return 'test'\n\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs)\n\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n # @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна\n # т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError.\n '''\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \\\n 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n '''\n # Если это класс сериалайзера, а не его экземпляр.\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n\n '''\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n '''\n\n\nENDPOINTS = ApiDocumentation().get_endpoints()\n\nENDPOINTS = [ep for ep in ENDPOINTS]\n\n# Собираем список запросов.\nREQUESTS_LIST = []\nfor endpoint in ENDPOINTS:\n for method in endpoint.allowed_methods:\n serializer = get_serializer(endpoint, method)\n if serializer:\n # @TODO: Доработать тестирование без обязательных данных в запросе (without_required).\n # for request_type in ('all', 'only_required', 'without_required'):\n for request_type in ('all', 'only_required'):\n REQUESTS_LIST.append((endpoint, method, serializer, request_type))\n else:\n REQUESTS_LIST.append((endpoint, method, serializer, None))\n\nREQUESTS_DATA = {}\n# Добавляем для них тестовые методы.\nfor endpoint, method, serializer, request_type in REQUESTS_LIST:\n method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type)\n REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type)\n setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)\n",
"step-ids": [
6,
11,
12,
14,
16
]
}
|
[
6,
11,
12,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(60 * 60)
<|reserved_special_token_0|>
print(24 * seconds_per_hour)
<|reserved_special_token_0|>
print(seconds_per_day / seconds_per_hour)
print(seconds_per_day // seconds_per_hour)
<|reserved_special_token_1|>
print(60 * 60)
seconds_per_hour = 60 * 60
print(24 * seconds_per_hour)
seconds_per_day = 24 * seconds_per_hour
print(seconds_per_day / seconds_per_hour)
print(seconds_per_day // seconds_per_hour)
|
flexible
|
{
"blob_id": "358879d83ed3058530031d50fb69e3ce11fbd524",
"index": 1057,
"step-1": "<mask token>\n",
"step-2": "print(60 * 60)\n<mask token>\nprint(24 * seconds_per_hour)\n<mask token>\nprint(seconds_per_day / seconds_per_hour)\nprint(seconds_per_day // seconds_per_hour)\n",
"step-3": "print(60 * 60)\nseconds_per_hour = 60 * 60\nprint(24 * seconds_per_hour)\nseconds_per_day = 24 * seconds_per_hour\nprint(seconds_per_day / seconds_per_hour)\nprint(seconds_per_day // seconds_per_hour)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import time,random,os
from tkinter import *
def restart():
root.destroy()
os.startfile(r"data\programs\game with tkinter.py")
def disableButton():
global l,restartButton,start
b1.config(state="disabled")
b2.config(state="disabled")
b3.config(state="disabled")
b4.config(state="disabled")
b5.config(state="disabled")
b6.config(state="disabled")
b7.config(state="disabled")
b8.config(state="disabled")
b9.config(state="disabled")
start.config(state="disabled")
restartButton.config(state="normal",command=restart,text=" --->press to restart<--- ")
def funForB1():
global notPresentList,element,l,start
ans = notPresentList[0] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB2():
global notPresentList,element,l
ans = notPresentList[1] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB3():
global notPresentList,element,l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList,element,l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList,element,l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList,element,l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList,element,l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList,element,l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList,element,l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open(r"data\database\present.txt", "r") as file:
content = file.read().split("\n")
presentList = [
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)]
]
element = presentList[random.randint(0,8)]
return (presentList,element)
def notPresent():
global buttonList,start
with open(r"data\database\notpresent.txt","r") as file:
content = file.read().split("\n")
notPresentList = [
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
]
start.config(state="normal")
obj = present()
presentList,element = obj[0],obj[1]
for i in range(9):
buttonList[i].config(text = presentList[i], state="disabled")
notPresentList.insert(random.randint(0,9),element)
return (notPresentList,element)
def start():
global buttonList,start,notPresentList,element
start.config(state="disabled")
for i in range(9):
buttonList[i].config(text = notPresentList[i], state="normal")
# main
root =Tk()
root.title("Memory Game")
root.geometry("400x500")
root.resizable(0,0)
root.config(bg="white")
image1 = PhotoImage(file=r"data\img\smiley.png")
image2 = PhotoImage(file=r"data\img\pleading.png")
start = Button(root, bg="black", fg="white", text="-->Start<--", font="comicsansms 15 bold", command=start, relief="raised",state="normal", bd=2)
start.place(x=150,y=110)
frameMain = Frame(root, relief="flat", bd=1, background="white", width=400, height=417)
frameMain.place(x=10, y=150)
image=PhotoImage(file=r"data\img\emoji.png")
l=Label(root,image=image ,font="comicsansms 15 bold", fg="black", bg="white")
l.place(x=180,y=5)
b1=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB1)
b2=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB2)
b3=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB3)
b4=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB4)
b5=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB5)
b6=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB6)
b7=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB7)
b8=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB8)
b9=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB9)
b1.place(x=10,y=16)
b2.place(x=150,y=16)
b3.place(x=290,y=16)
b4.place(x=10,y=110)
b5.place(x=150,y=110)
b6.place(x=290,y=110)
b7.place(x=10,y=204)
b8.place(x=150,y=204)
b9.place(x=290,y=204)
buttonList = [b1,b2,b3,b4,b5,b6,b7,b8,b9]
restartButton = Button(root, bg="teal", fg="white", text="!!! Remember these items !!!", font="comicsansms 15 bold", relief="raised",state="disabled",disabledforeground="white")
restartButton.place(x=60,y=460)
obj = notPresent()
notPresentList,element = obj[0],obj[1]
root.mainloop()
|
normal
|
{
"blob_id": "e70c5c9a62faa4c501c0f103ce0a0a419aaf4301",
"index": 2096,
"step-1": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\n<mask token>\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\n<mask token>\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\ndef funForB1():\n global notPresentList, element, l, start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\ndef start():\n global buttonList, start, notPresentList, element\n start.config(state='disabled')\n for i in range(9):\n buttonList[i].config(text=notPresentList[i], state='normal')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\ndef funForB1():\n global notPresentList, element, l, start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\ndef start():\n global buttonList, start, notPresentList, element\n start.config(state='disabled')\n for i in range(9):\n buttonList[i].config(text=notPresentList[i], state='normal')\n\n\n<mask token>\nroot.title('Memory Game')\nroot.geometry('400x500')\nroot.resizable(0, 0)\nroot.config(bg='white')\n<mask token>\nstart.place(x=150, y=110)\n<mask token>\nframeMain.place(x=10, y=150)\n<mask token>\nl.place(x=180, y=5)\n<mask token>\nb1.place(x=10, y=16)\nb2.place(x=150, y=16)\nb3.place(x=290, y=16)\nb4.place(x=10, y=110)\nb5.place(x=150, y=110)\nb6.place(x=290, y=110)\nb7.place(x=10, y=204)\nb8.place(x=150, y=204)\nb9.place(x=290, y=204)\n<mask token>\nrestartButton.place(x=60, y=460)\n<mask token>\nroot.mainloop()\n",
"step-5": "import time,random,os\nfrom tkinter import *\n\ndef restart():\n root.destroy()\n os.startfile(r\"data\\programs\\game with tkinter.py\")\n \ndef disableButton():\n global l,restartButton,start\n b1.config(state=\"disabled\")\n b2.config(state=\"disabled\")\n b3.config(state=\"disabled\")\n b4.config(state=\"disabled\")\n b5.config(state=\"disabled\")\n b6.config(state=\"disabled\")\n b7.config(state=\"disabled\")\n b8.config(state=\"disabled\")\n b9.config(state=\"disabled\")\n start.config(state=\"disabled\")\n restartButton.config(state=\"normal\",command=restart,text=\" --->press to restart<--- \")\n \ndef funForB1():\n global notPresentList,element,l,start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB2():\n global notPresentList,element,l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB3():\n global notPresentList,element,l\n ans = notPresentList[2] == element\n if ans:\n \n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB4():\n global notPresentList,element,l\n ans = notPresentList[3] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB5():\n global notPresentList,element,l\n ans = notPresentList[4] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB6():\n global notPresentList,element,l\n ans = notPresentList[5] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB7():\n global notPresentList,element,l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB8():\n global notPresentList,element,l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB9():\n global notPresentList,element,l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open(r\"data\\database\\present.txt\", \"r\") as file:\n content = file.read().split(\"\\n\")\n presentList = [\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)]\n ]\n \n element = presentList[random.randint(0,8)]\n return (presentList,element)\n\ndef notPresent():\n global buttonList,start\n with open(r\"data\\database\\notpresent.txt\",\"r\") as file:\n content = file.read().split(\"\\n\")\n notPresentList = [\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n ]\n start.config(state=\"normal\")\n obj = present()\n presentList,element = obj[0],obj[1]\n for i in range(9):\n buttonList[i].config(text = presentList[i], state=\"disabled\")\n notPresentList.insert(random.randint(0,9),element)\n\n return (notPresentList,element)\n\ndef start():\n global buttonList,start,notPresentList,element\n start.config(state=\"disabled\")\n\n for i in range(9):\n buttonList[i].config(text = notPresentList[i], state=\"normal\")\n\n \n \n\n \n# main\n\nroot =Tk()\nroot.title(\"Memory Game\")\nroot.geometry(\"400x500\")\nroot.resizable(0,0)\nroot.config(bg=\"white\")\n\nimage1 = PhotoImage(file=r\"data\\img\\smiley.png\")\nimage2 = PhotoImage(file=r\"data\\img\\pleading.png\")\n\n\nstart = Button(root, bg=\"black\", fg=\"white\", text=\"-->Start<--\", font=\"comicsansms 15 bold\", command=start, relief=\"raised\",state=\"normal\", bd=2)\nstart.place(x=150,y=110)\n\n\n\nframeMain = Frame(root, relief=\"flat\", bd=1, background=\"white\", width=400, height=417)\nframeMain.place(x=10, y=150)\n\n\nimage=PhotoImage(file=r\"data\\img\\emoji.png\")\nl=Label(root,image=image ,font=\"comicsansms 15 bold\", fg=\"black\", bg=\"white\")\nl.place(x=180,y=5)\n\nb1=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB1)\nb2=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB2)\nb3=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB3)\nb4=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB4)\nb5=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB5)\nb6=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB6)\nb7=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB7)\nb8=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB8)\nb9=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB9)\n\n\nb1.place(x=10,y=16)\nb2.place(x=150,y=16)\nb3.place(x=290,y=16)\nb4.place(x=10,y=110)\nb5.place(x=150,y=110)\nb6.place(x=290,y=110)\nb7.place(x=10,y=204)\nb8.place(x=150,y=204)\nb9.place(x=290,y=204)\n\nbuttonList = [b1,b2,b3,b4,b5,b6,b7,b8,b9]\n\n\nrestartButton = Button(root, bg=\"teal\", fg=\"white\", text=\"!!! Remember these items !!!\", font=\"comicsansms 15 bold\", relief=\"raised\",state=\"disabled\",disabledforeground=\"white\")\nrestartButton.place(x=60,y=460)\nobj = notPresent()\nnotPresentList,element = obj[0],obj[1]\n\nroot.mainloop()\n",
"step-ids": [
11,
12,
14,
15,
18
]
}
|
[
11,
12,
14,
15,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.callback(Output('node-6-display-value', 'children'), [Input(
'node-6-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
layout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',
options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',
'MTL', 'LA']]), html.Div(id='node-6-display-value')])
@app.callback(Output('node-6-display-value', 'children'), [Input(
'node-6-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
<|reserved_special_token_1|>
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',
options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',
'MTL', 'LA']]), html.Div(id='node-6-display-value')])
@app.callback(Output('node-6-display-value', 'children'), [Input(
'node-6-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
<|reserved_special_token_1|>
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
html.H3('Node 6'),
dcc.Dropdown(
id='node-6-dropdown',
options=[
{'label': 'Node 6 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='node-6-display-value'),
])
@app.callback(
Output('node-6-display-value', 'children'),
[Input('node-6-dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
|
flexible
|
{
"blob_id": "632b90ea5a2ac35539e589af297c04b31bbf02d0",
"index": 3443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.callback(Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n",
"step-3": "<mask token>\nlayout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',\n options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',\n 'MTL', 'LA']]), html.Div(id='node-6-display-value')])\n\n\n@app.callback(Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n",
"step-4": "import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom app import app\nlayout = html.Div([html.H3('Node 6'), dcc.Dropdown(id='node-6-dropdown',\n options=[{'label': 'Node 6 - {}'.format(i), 'value': i} for i in ['NYC',\n 'MTL', 'LA']]), html.Div(id='node-6-display-value')])\n\n\n@app.callback(Output('node-6-display-value', 'children'), [Input(\n 'node-6-dropdown', 'value')])\ndef display_value(value):\n return 'You have selected \"{}\"'.format(value)\n",
"step-5": "import dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output\r\n\r\nfrom app import app\r\n\r\nlayout = html.Div([\r\n html.H3('Node 6'),\r\n dcc.Dropdown(\r\n id='node-6-dropdown',\r\n options=[\r\n {'label': 'Node 6 - {}'.format(i), 'value': i} for i in [\r\n 'NYC', 'MTL', 'LA'\r\n ]\r\n ]\r\n ),\r\n html.Div(id='node-6-display-value'),\r\n\r\n])\r\n\r\n\r\n@app.callback(\r\n Output('node-6-display-value', 'children'),\r\n [Input('node-6-dropdown', 'value')])\r\ndef display_value(value):\r\n return 'You have selected \"{}\"'.format(value)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# C8-06 p.146 Write city_country() function that takes name city and country
# Print city name then the country the city is in. call 3 times with differet pairs.
def city_country(city, country):
"""Name a city and the country it resides in seperated by a comma."""
print(f'"{city.title()}, {country.title()}"\n')
city_country("St. John's", 'Canada')
city_country("ottawa", "Ontario")
city_country('cairo', 'egypt')
|
normal
|
{
"blob_id": "2866ecf69969b445fb15740a507ddecb1dd1762d",
"index": 3395,
"step-1": "<mask token>\n",
"step-2": "def city_country(city, country):\n \"\"\"Name a city and the country it resides in seperated by a comma.\"\"\"\n print(f'\"{city.title()}, {country.title()}\"\\n')\n\n\n<mask token>\n",
"step-3": "def city_country(city, country):\n \"\"\"Name a city and the country it resides in seperated by a comma.\"\"\"\n print(f'\"{city.title()}, {country.title()}\"\\n')\n\n\ncity_country(\"St. John's\", 'Canada')\ncity_country('ottawa', 'Ontario')\ncity_country('cairo', 'egypt')\n",
"step-4": "# C8-06 p.146 Write city_country() function that takes name city and country\n# Print city name then the country the city is in. call 3 times with differet pairs.\n\ndef city_country(city, country):\n \"\"\"Name a city and the country it resides in seperated by a comma.\"\"\"\n print(f'\"{city.title()}, {country.title()}\"\\n')\n\n\ncity_country(\"St. John's\", 'Canada')\n\ncity_country(\"ottawa\", \"Ontario\")\n\ncity_country('cairo', 'egypt')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from app.models import *
# Register your models here.
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Thread)
admin.site.register(Comment)
admin.site.register(Experience)
admin.site.register(ThreadTag)
admin.site.register(ExperienceTag)
admin.site.register(UserProfile)
admin.site.register(ExperiencesLike)
admin.site.register(ExperiencesDislike)
admin.site.register(Like)
admin.site.register(Dislike)
admin.site.register(Toolbox)
admin.site.register(ToolboxUser)
admin.site.register(Question)
admin.site.register(Answer)
|
normal
|
{
"blob_id": "a9f3d5f11a9f2781571029b54d54b41d9f1f83b3",
"index": 592,
"step-1": "<mask token>\n\n\nclass ProfileInline(admin.StackedInline):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)\n",
"step-4": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\nfrom app.models import *\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)\n",
"step-5": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\n\nfrom app.models import *\n\n# Register your models here.\n\nclass ProfileInline(admin.StackedInline):\n\tmodel = UserProfile\n\tcan_delete = False\n\tverbose_name_plural = 'profile'\n\nclass UserAdmin(BaseUserAdmin):\n\tinlines = (ProfileInline, )\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
"""
This file contains all the required methods for the street prediction utilizing
the Hough transform.
"""
import numpy as np
import scipy.ndimage as ndi
from skimage.draw import polygon
from skimage.transform import hough_line
def draw_roads(roads, shape):
"""
Creates an image with roads drawn as full lines.
Parameters:
roads -- ndarray describing all roads to be drawn
shape -- shape (size) of image
The parameters are exactly what is returned by find_roads (see there).
Returns:
An numpy.ndarray with shape 'shape' and floating point type, where
background has probability 0 and roads have been drawn on top of
each other, with pixel values equal to the road strength, from
lowest to highest strength.
"""
im = np.zeros(shape)
for i in reversed(range(roads.shape[0])):
strength, angle, distance, width = roads[i]
coord = _get_line_box_cuts(angle, distance, *shape)
if coord is None: continue # do not abort on bogus angle/distance
coord = np.asarray(coord)
x, y = _road_polygon(coord, width)
rr, cc = polygon(y, x, shape)
im[rr,cc] = strength
return im
def find_roads(
probability_map,
*,
input_threshold=0.3,
max_roads=None,
min_strength=0.17, #0.2,
num_angles=720,
roads_min_angle=np.pi/8,
roads_min_distance=50,
debugimage=None, # for debugging ...
debugprint=None): # for debugging ...
"""
Finds full-image roads in probability map (image).
Parameters:
probability_map -- an numpy.ndarray with probabilities per pixel (*)
(*) i.e., the array is shaped HxW, with pixel values from 0 to 1
Keyword-Only Parameters:
input_threshold -- threshold applied to probability_map
max_roads -- maximum number of roads to be found
min_strength -- minimum strength of roads to be found
num_angles -- angular resolution used in hough transforms
roads_min_angle -- minimum required angle between roads
roads_min_distance -- minimum required distance between roads
Returns:
roads -- roads that have been found (*)
shape -- shape of probability_map (vector with 2 elements)
(*) A numpy.ndarray with floating point type of shape Nx4, with N being
the number of roads found, and 4 corresponding to columns 'strength',
'angle', 'distance', 'width'. Strength is the response for the road
(the "probability"), 'angle' and 'distance' correspond to the values
returned by skimage.transform.hough_line, and 'width' is the
identified road width (can currently be 12, 32 or 48).
"""
# shorthand
im = probability_map
# the angles to be used in the Hough transform
theta = np.linspace(-np.pi/2, np.pi/2, num_angles)
# normalize almost anything to grayscale
if im.ndim == 3:
if im.shape[2] == 4:
im = im[:,:,:3] # throw away alpha
im = im.mean(axis=2) # convert RGB to grayscale
if debugimage: debugimage('original', im, 0, 1, 'jet')
assert im.ndim == 2
if debugimage:
hspace, _, _ = hough_line(im, theta)
debugimage('original_hough_hspace', hspace)
# create monochrome/binary input map
im[im >= input_threshold] = 1
im[im < input_threshold] = 0
if debugimage: debugimage('threshold_applied', im)
# Hough transform
hspace, angles, distances = hough_line(im, theta)
hspace = np.asarray(hspace, dtype=np.float32)
hspace /= hspace.max() # normalize
if debugimage: debugimage('hough_hspace', hspace)
# convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels
w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])
w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])
w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])
# convolve
im12 = ndi.filters.convolve1d(hspace, w12, axis=0)
im32 = ndi.filters.convolve1d(hspace, w32, axis=0)
im48 = ndi.filters.convolve1d(hspace, w48, axis=0)
# normalize signal strengths for different road widths
im12 /= 12
im32 /= 32
im48 /= 48
ca = (None, None, 'jet',)
if debugimage: debugimage('hough_hspace_conv12', im12, *ca)
if debugimage: debugimage('hough_hspace_conv32', im32, *ca)
if debugimage: debugimage('hough_hspace_conv48', im48, *ca)
if debugimage:
debugimage('hough_hspace_combined',
np.hstack([im12, im32, im48]), *ca)
# compute possible roads of all widths, sorted by signal strength
seq = np.stack((im12, im32, im48)).flatten()
sor = np.argsort(seq)
roads = np.column_stack((
seq,
np.tile(np.tile(angles, distances.shape[0]), 3),
np.tile(np.repeat(distances, angles.shape[0]), 3),
np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])
))[sor][::-1]
# columns: strength, angle, distance, width
found_roads = np.asarray([]).reshape(0, 4)
# find as many as strong roads as desired, while dropping roads that are too
# similar to roads already found (non-max suppression)
for i in range(roads.shape[0]):
if roads[i,0] < min_strength:
break
a = roads[i,1]
d = roads[i,2]
close = (
np.logical_or(
np.logical_and(
np.abs(found_roads[:,1]-a) < roads_min_angle,
np.abs(found_roads[:,2]-d) < roads_min_distance),
np.logical_and(
np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,
np.abs(found_roads[:,2]+d) < roads_min_distance)))
if not np.any(close):
found_roads = np.vstack((found_roads, roads[i]))
if max_roads is not None and found_roads.shape[0] >= max_roads:
break
return found_roads, im.shape
# find begin and end coordinates of an intersection of a box (0, 0, width,
# height) with a line (given by angle and distance, as per Hough transform)
def _get_line_box_cuts(angle, distance, width, height):
a = np.cos(angle)
b = np.sin(angle)
d = distance
# TODO: handle divide-by-zero
x0 = d/a
x1 = (d-b*height)/a
y0 = d/b
y1 = (d-a*width)/b
intersections = []
if x0 >= 0 and x0 <= width: intersections.append((x0, 0))
if x1 >= 0 and x1 <= width: intersections.append((x1, height))
if y0 >= 0 and y0 <= height: intersections.append((0, y0))
if y1 >= 0 and y1 <= height: intersections.append((width, y1))
# TODO: what about degenerate cases?
if len(intersections) == 0: return None
assert len(intersections) == 2, (x0, x1, y0, y1)
return intersections
# return a list of pixel coordinates, usable to index 2D ndarrays, that
# correspond to the shape of line segment with given width
def _road_polygon(endpoints, width):
a, b = endpoints
a = np.asarray(a)
b = np.asarray(b)
n = b-a
n /= np.linalg.norm(n)
n *= width / 2
s = np.dot(np.array([[0, -1], [1, 0]]), n)
xy = np.array([
a - n - s,
a - n + s,
b + n + s,
b + n - s
])
x = xy[:,0]
y = xy[:,1]
return [x, y]
|
normal
|
{
"blob_id": "f76185095ebb1adbf7ae22ffb500ffc3d6b0a30d",
"index": 6019,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_roads(probability_map, *, input_threshold=0.3, max_roads=None,\n min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,\n roads_min_distance=50, debugimage=None, debugprint=None):\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n im = probability_map\n theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:, :, :3]\n im = im.mean(axis=2)\n if debugimage:\n debugimage('original', im, 0, 1, 'jet')\n assert im.ndim == 2\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n if debugimage:\n debugimage('threshold_applied', im)\n hspace, angles, distances = hough_line(im, theta)\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max()\n if debugimage:\n debugimage('hough_hspace', hspace)\n w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])\n w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])\n w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n im12 /= 12\n im32 /= 32\n im48 /= 48\n ca = None, None, 'jet'\n if debugimage:\n debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage:\n debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage:\n debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0\n ]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.\n repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]\n found_roads = np.asarray([]).reshape(0, 4)\n for i in range(roads.shape[0]):\n if roads[i, 0] < min_strength:\n break\n a = roads[i, 1]\n d = roads[i, 2]\n close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <\n roads_min_angle, np.abs(found_roads[:, 2] - d) <\n roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[\n :, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <\n roads_min_distance))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n return found_roads, im.shape\n\n\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n x0 = d / a\n x1 = (d - b * height) / a\n y0 = d / b\n y1 = (d - a * width) / b\n intersections = []\n if x0 >= 0 and x0 <= width:\n intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width:\n intersections.append((x1, height))\n if y0 >= 0 and y0 <= height:\n intersections.append((0, y0))\n if y1 >= 0 and y1 <= height:\n intersections.append((width, y1))\n if len(intersections) == 0:\n return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b - a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])\n x = xy[:, 0]\n y = xy[:, 1]\n return [x, y]\n",
"step-3": "<mask token>\n\n\ndef draw_roads(roads, shape):\n \"\"\"\n Creates an image with roads drawn as full lines.\n\n Parameters:\n roads -- ndarray describing all roads to be drawn\n shape -- shape (size) of image\n\n The parameters are exactly what is returned by find_roads (see there).\n\n Returns:\n An numpy.ndarray with shape 'shape' and floating point type, where\n background has probability 0 and roads have been drawn on top of\n each other, with pixel values equal to the road strength, from\n lowest to highest strength.\n\n \"\"\"\n im = np.zeros(shape)\n for i in reversed(range(roads.shape[0])):\n strength, angle, distance, width = roads[i]\n coord = _get_line_box_cuts(angle, distance, *shape)\n if coord is None:\n continue\n coord = np.asarray(coord)\n x, y = _road_polygon(coord, width)\n rr, cc = polygon(y, x, shape)\n im[rr, cc] = strength\n return im\n\n\ndef find_roads(probability_map, *, input_threshold=0.3, max_roads=None,\n min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,\n roads_min_distance=50, debugimage=None, debugprint=None):\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n im = probability_map\n theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:, :, :3]\n im = im.mean(axis=2)\n if debugimage:\n debugimage('original', im, 0, 1, 'jet')\n assert im.ndim == 2\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n if debugimage:\n debugimage('threshold_applied', im)\n hspace, angles, distances = hough_line(im, theta)\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max()\n if debugimage:\n debugimage('hough_hspace', hspace)\n w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])\n w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])\n w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n im12 /= 12\n im32 /= 32\n im48 /= 48\n ca = None, None, 'jet'\n if debugimage:\n debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage:\n debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage:\n debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0\n ]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.\n repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]\n found_roads = np.asarray([]).reshape(0, 4)\n for i in range(roads.shape[0]):\n if roads[i, 0] < min_strength:\n break\n a = roads[i, 1]\n d = roads[i, 2]\n close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <\n roads_min_angle, np.abs(found_roads[:, 2] - d) <\n roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[\n :, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <\n roads_min_distance))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n return found_roads, im.shape\n\n\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n x0 = d / a\n x1 = (d - b * height) / a\n y0 = d / b\n y1 = (d - a * width) / b\n intersections = []\n if x0 >= 0 and x0 <= width:\n intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width:\n intersections.append((x1, height))\n if y0 >= 0 and y0 <= height:\n intersections.append((0, y0))\n if y1 >= 0 and y1 <= height:\n intersections.append((width, y1))\n if len(intersections) == 0:\n return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b - a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])\n x = xy[:, 0]\n y = xy[:, 1]\n return [x, y]\n",
"step-4": "<mask token>\nimport numpy as np\nimport scipy.ndimage as ndi\nfrom skimage.draw import polygon\nfrom skimage.transform import hough_line\n\n\ndef draw_roads(roads, shape):\n \"\"\"\n Creates an image with roads drawn as full lines.\n\n Parameters:\n roads -- ndarray describing all roads to be drawn\n shape -- shape (size) of image\n\n The parameters are exactly what is returned by find_roads (see there).\n\n Returns:\n An numpy.ndarray with shape 'shape' and floating point type, where\n background has probability 0 and roads have been drawn on top of\n each other, with pixel values equal to the road strength, from\n lowest to highest strength.\n\n \"\"\"\n im = np.zeros(shape)\n for i in reversed(range(roads.shape[0])):\n strength, angle, distance, width = roads[i]\n coord = _get_line_box_cuts(angle, distance, *shape)\n if coord is None:\n continue\n coord = np.asarray(coord)\n x, y = _road_polygon(coord, width)\n rr, cc = polygon(y, x, shape)\n im[rr, cc] = strength\n return im\n\n\ndef find_roads(probability_map, *, input_threshold=0.3, max_roads=None,\n min_strength=0.17, num_angles=720, roads_min_angle=np.pi / 8,\n roads_min_distance=50, debugimage=None, debugprint=None):\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n im = probability_map\n theta = np.linspace(-np.pi / 2, np.pi / 2, num_angles)\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:, :, :3]\n im = im.mean(axis=2)\n if debugimage:\n debugimage('original', im, 0, 1, 'jet')\n assert im.ndim == 2\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n if debugimage:\n debugimage('threshold_applied', im)\n hspace, angles, distances = hough_line(im, theta)\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max()\n if debugimage:\n debugimage('hough_hspace', hspace)\n w12 = np.concatenate([-np.ones(6), np.ones(12), -np.ones(6)])\n w32 = np.concatenate([-np.ones(16), np.ones(32), -np.ones(16)])\n w48 = np.concatenate([-np.ones(24), np.ones(48), -np.ones(24)])\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n im12 /= 12\n im32 /= 32\n im48 /= 48\n ca = None, None, 'jet'\n if debugimage:\n debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage:\n debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage:\n debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined', np.hstack([im12, im32, im48]), *ca)\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((seq, np.tile(np.tile(angles, distances.shape[0\n ]), 3), np.tile(np.repeat(distances, angles.shape[0]), 3), np.\n repeat([12, 32, 48], distances.shape[0] * angles.shape[0])))[sor][::-1]\n found_roads = np.asarray([]).reshape(0, 4)\n for i in range(roads.shape[0]):\n if roads[i, 0] < min_strength:\n break\n a = roads[i, 1]\n d = roads[i, 2]\n close = np.logical_or(np.logical_and(np.abs(found_roads[:, 1] - a) <\n roads_min_angle, np.abs(found_roads[:, 2] - d) <\n roads_min_distance), np.logical_and(np.pi - np.abs(found_roads[\n :, 1] - a) < roads_min_angle, np.abs(found_roads[:, 2] + d) <\n roads_min_distance))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n return found_roads, im.shape\n\n\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n x0 = d / a\n x1 = (d - b * height) / a\n y0 = d / b\n y1 = (d - a * width) / b\n intersections = []\n if x0 >= 0 and x0 <= width:\n intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width:\n intersections.append((x1, height))\n if y0 >= 0 and y0 <= height:\n intersections.append((0, y0))\n if y1 >= 0 and y1 <= height:\n intersections.append((width, y1))\n if len(intersections) == 0:\n return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b - a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([a - n - s, a - n + s, b + n + s, b + n - s])\n x = xy[:, 0]\n y = xy[:, 1]\n return [x, y]\n",
"step-5": "#!/usr/bin/env python3\n\n\"\"\"\nThis file contains all the required methods for the street prediction utilizing\nthe Hough transform.\n\"\"\"\n\nimport numpy as np\nimport scipy.ndimage as ndi\n\nfrom skimage.draw import polygon\nfrom skimage.transform import hough_line\n\n\ndef draw_roads(roads, shape):\n \"\"\"\n Creates an image with roads drawn as full lines.\n\n Parameters:\n roads -- ndarray describing all roads to be drawn\n shape -- shape (size) of image\n\n The parameters are exactly what is returned by find_roads (see there).\n\n Returns:\n An numpy.ndarray with shape 'shape' and floating point type, where\n background has probability 0 and roads have been drawn on top of\n each other, with pixel values equal to the road strength, from\n lowest to highest strength.\n\n \"\"\"\n\n im = np.zeros(shape)\n\n for i in reversed(range(roads.shape[0])):\n strength, angle, distance, width = roads[i]\n coord = _get_line_box_cuts(angle, distance, *shape)\n if coord is None: continue # do not abort on bogus angle/distance\n coord = np.asarray(coord)\n x, y = _road_polygon(coord, width)\n rr, cc = polygon(y, x, shape)\n im[rr,cc] = strength\n\n return im\n\n\ndef find_roads(\n probability_map,\n *,\n input_threshold=0.3,\n max_roads=None,\n min_strength=0.17, #0.2,\n num_angles=720,\n roads_min_angle=np.pi/8,\n roads_min_distance=50,\n debugimage=None, # for debugging ...\n debugprint=None): # for debugging ...\n \"\"\"\n Finds full-image roads in probability map (image).\n\n Parameters:\n probability_map -- an numpy.ndarray with probabilities per pixel (*)\n\n (*) i.e., the array is shaped HxW, with pixel values from 0 to 1\n\n Keyword-Only Parameters:\n input_threshold -- threshold applied to probability_map\n max_roads -- maximum number of roads to be found\n min_strength -- minimum strength of roads to be found\n num_angles -- angular resolution used in hough transforms\n roads_min_angle -- minimum required angle between roads\n roads_min_distance -- minimum required distance between roads\n\n Returns:\n roads -- roads that have been found (*)\n shape -- shape of probability_map (vector with 2 elements)\n\n (*) A numpy.ndarray with floating point type of shape Nx4, with N being\n the number of roads found, and 4 corresponding to columns 'strength',\n 'angle', 'distance', 'width'. Strength is the response for the road\n (the \"probability\"), 'angle' and 'distance' correspond to the values\n returned by skimage.transform.hough_line, and 'width' is the\n identified road width (can currently be 12, 32 or 48).\n\n \"\"\"\n\n # shorthand\n im = probability_map\n\n # the angles to be used in the Hough transform\n theta = np.linspace(-np.pi/2, np.pi/2, num_angles)\n\n # normalize almost anything to grayscale\n if im.ndim == 3:\n if im.shape[2] == 4:\n im = im[:,:,:3] # throw away alpha\n im = im.mean(axis=2) # convert RGB to grayscale\n\n if debugimage: debugimage('original', im, 0, 1, 'jet')\n\n assert im.ndim == 2\n\n if debugimage:\n hspace, _, _ = hough_line(im, theta)\n debugimage('original_hough_hspace', hspace)\n\n # create monochrome/binary input map\n im[im >= input_threshold] = 1\n im[im < input_threshold] = 0\n\n if debugimage: debugimage('threshold_applied', im)\n\n # Hough transform\n hspace, angles, distances = hough_line(im, theta)\n\n hspace = np.asarray(hspace, dtype=np.float32)\n hspace /= hspace.max() # normalize\n\n if debugimage: debugimage('hough_hspace', hspace)\n\n # convolution filters, rectangular, tuned for widths of 12, 32, 48 pixels\n w12 = np.concatenate([-np.ones((6)), np.ones((12)), -np.ones((6))])\n w32 = np.concatenate([-np.ones((16)), np.ones((32)), -np.ones((16))])\n w48 = np.concatenate([-np.ones((24)), np.ones((48)), -np.ones((24))])\n\n # convolve\n im12 = ndi.filters.convolve1d(hspace, w12, axis=0)\n im32 = ndi.filters.convolve1d(hspace, w32, axis=0)\n im48 = ndi.filters.convolve1d(hspace, w48, axis=0)\n\n # normalize signal strengths for different road widths\n im12 /= 12\n im32 /= 32\n im48 /= 48\n\n ca = (None, None, 'jet',)\n if debugimage: debugimage('hough_hspace_conv12', im12, *ca)\n if debugimage: debugimage('hough_hspace_conv32', im32, *ca)\n if debugimage: debugimage('hough_hspace_conv48', im48, *ca)\n if debugimage:\n debugimage('hough_hspace_combined',\n np.hstack([im12, im32, im48]), *ca)\n\n # compute possible roads of all widths, sorted by signal strength\n seq = np.stack((im12, im32, im48)).flatten()\n sor = np.argsort(seq)\n roads = np.column_stack((\n seq,\n np.tile(np.tile(angles, distances.shape[0]), 3),\n np.tile(np.repeat(distances, angles.shape[0]), 3),\n np.repeat([12, 32, 48], distances.shape[0] * angles.shape[0])\n ))[sor][::-1]\n\n # columns: strength, angle, distance, width\n found_roads = np.asarray([]).reshape(0, 4)\n\n # find as many as strong roads as desired, while dropping roads that are too\n # similar to roads already found (non-max suppression)\n for i in range(roads.shape[0]):\n if roads[i,0] < min_strength:\n break\n a = roads[i,1]\n d = roads[i,2]\n close = (\n np.logical_or(\n np.logical_and(\n np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]-d) < roads_min_distance),\n np.logical_and(\n np.pi - np.abs(found_roads[:,1]-a) < roads_min_angle,\n np.abs(found_roads[:,2]+d) < roads_min_distance)))\n if not np.any(close):\n found_roads = np.vstack((found_roads, roads[i]))\n if max_roads is not None and found_roads.shape[0] >= max_roads:\n break\n\n return found_roads, im.shape\n\n\n# find begin and end coordinates of an intersection of a box (0, 0, width,\n# height) with a line (given by angle and distance, as per Hough transform)\ndef _get_line_box_cuts(angle, distance, width, height):\n a = np.cos(angle)\n b = np.sin(angle)\n d = distance\n # TODO: handle divide-by-zero\n x0 = d/a\n x1 = (d-b*height)/a\n y0 = d/b\n y1 = (d-a*width)/b\n intersections = []\n if x0 >= 0 and x0 <= width: intersections.append((x0, 0))\n if x1 >= 0 and x1 <= width: intersections.append((x1, height))\n if y0 >= 0 and y0 <= height: intersections.append((0, y0))\n if y1 >= 0 and y1 <= height: intersections.append((width, y1))\n # TODO: what about degenerate cases?\n if len(intersections) == 0: return None\n assert len(intersections) == 2, (x0, x1, y0, y1)\n return intersections\n\n\n# return a list of pixel coordinates, usable to index 2D ndarrays, that\n# correspond to the shape of line segment with given width\ndef _road_polygon(endpoints, width):\n a, b = endpoints\n a = np.asarray(a)\n b = np.asarray(b)\n n = b-a\n n /= np.linalg.norm(n)\n n *= width / 2\n s = np.dot(np.array([[0, -1], [1, 0]]), n)\n xy = np.array([\n a - n - s,\n a - n + s,\n b + n + s,\n b + n - s\n ])\n x = xy[:,0]\n y = xy[:,1]\n return [x, y]\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),
allow_null=True)
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = 'id', 'name', 'city', 'films', 'owner'
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = 'id', 'name', 'city'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = 'id', 'name', 'film_set'
depth = 1
class GenreWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = 'id', 'name', 'film_set'
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),
allow_null=True)
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = 'id', 'name', 'city', 'films', 'owner'
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = 'id', 'name', 'city'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = User
fields = 'id', 'username', 'films', 'theaters'
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = 'id', 'name', 'film_set'
depth = 1
class GenreWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = 'id', 'name', 'film_set'
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),
allow_null=True)
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = 'id', 'name', 'city', 'films', 'owner'
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = 'id', 'name', 'city'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserSerializer(serializers.ModelSerializer):
films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.
objects.all())
theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.
objects.all())
class Meta:
model = User
fields = 'id', 'username', 'films', 'theaters'
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = 'id', 'name', 'film_set'
depth = 1
class GenreWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = 'id', 'name', 'film_set'
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),
allow_null=True)
class Meta:
model = Film
fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = 'id', 'name', 'city', 'films', 'owner'
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = 'id', 'name', 'city'
<|reserved_special_token_1|>
from rest_framework import serializers
from films.models import *
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())
theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'films', 'theaters')
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ('id', 'name', 'film_set')
depth = 1
class GenreWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ('id', 'name', 'film_set')
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = ('id', 'title', 'year_prod', 'genre', 'theater_set', 'owner')
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(), allow_null=True)
class Meta:
model = Film
fields = ('id', 'title', 'year_prod', 'genre', 'theater_set')
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = ('id', 'name', 'city', 'films', 'owner')
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = ('id', 'name', 'city')
|
flexible
|
{
"blob_id": "e6aa28ae312ea5d7f0f818b7e86b0e76e2e57b48",
"index": 4652,
"step-1": "<mask token>\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-2": "<mask token>\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n depth = 1\n\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-3": "<mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'films', 'theaters'\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n depth = 1\n\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-4": "<mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.\n objects.all())\n theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.\n objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'films', 'theaters'\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n depth = 1\n\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Genre\n fields = 'id', 'name', 'film_set'\n\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set', 'owner'\n depth = 1\n\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(),\n allow_null=True)\n\n\n class Meta:\n model = Film\n fields = 'id', 'title', 'year_prod', 'genre', 'theater_set'\n\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city', 'films', 'owner'\n depth = 1\n\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Theater\n fields = 'id', 'name', 'city'\n",
"step-5": "from rest_framework import serializers\nfrom films.models import *\nfrom django.contrib.auth.models import User\n\nclass UserSerializer(serializers.ModelSerializer):\n films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())\n theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())\n\n class Meta:\n model = User\n fields = ('id', 'username', 'films', 'theaters')\n\nclass GenreSerializer(serializers.ModelSerializer):\n class Meta:\n model = Genre\n fields = ('id', 'name', 'film_set')\n depth = 1\n\nclass GenreWriteSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Genre\n fields = ('id', 'name', 'film_set')\n\nclass FilmSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n\n class Meta:\n model = Film\n fields = ('id', 'title', 'year_prod', 'genre', 'theater_set', 'owner')\n depth = 1\n\nclass FilmWriteSerializer(serializers.ModelSerializer):\n genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(), allow_null=True)\n\n class Meta:\n model = Film\n fields = ('id', 'title', 'year_prod', 'genre', 'theater_set')\n\nclass TheaterSerializer(serializers.ModelSerializer):\n owner = serializers.ReadOnlyField(source='owner.username')\n class Meta:\n model = Theater\n fields = ('id', 'name', 'city', 'films', 'owner')\n depth = 1\n\nclass TheaterWriteSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Theater\n fields = ('id', 'name', 'city')\n",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(Webpage)
<|reserved_special_token_0|>
print(a)
<|reserved_special_token_0|>
print(total_page)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url_header = (
'http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314'
)
Webpage = urllib.request.urlopen(url_header).read()
Webpage = Webpage.decode('UTF-8')
print(Webpage)
a = re.findall('var m_nRecordCount = (\\d+)', str(Webpage))
print(a)
total_page = math.ceil(int(a[0]) / 20)
print(total_page)
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
import urllib.request
import re
import math
url_header = (
'http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314'
)
Webpage = urllib.request.urlopen(url_header).read()
Webpage = Webpage.decode('UTF-8')
print(Webpage)
a = re.findall('var m_nRecordCount = (\\d+)', str(Webpage))
print(a)
total_page = math.ceil(int(a[0]) / 20)
print(total_page)
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
import urllib.request
import re
import math
url_header = "http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314"
Webpage = urllib.request.urlopen(url_header).read()
Webpage=Webpage.decode('UTF-8')
# soup = BeautifulSoup(Webpage)
print (Webpage)
a=re.findall(r'var m_nRecordCount = (\d+)',str(Webpage))
print(a)
# page_count=soup.find('script')
# print(page_count)
total_page=math.ceil(int(a[0])/20)
print(total_page)
|
flexible
|
{
"blob_id": "62a86bd33755510f0d71f4920e63be1a3ce8c563",
"index": 6304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(Webpage)\n<mask token>\nprint(a)\n<mask token>\nprint(total_page)\n",
"step-3": "<mask token>\nurl_header = (\n 'http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314'\n )\nWebpage = urllib.request.urlopen(url_header).read()\nWebpage = Webpage.decode('UTF-8')\nprint(Webpage)\na = re.findall('var m_nRecordCount = (\\\\d+)', str(Webpage))\nprint(a)\ntotal_page = math.ceil(int(a[0]) / 20)\nprint(total_page)\n",
"step-4": "from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport math\nurl_header = (\n 'http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314'\n )\nWebpage = urllib.request.urlopen(url_header).read()\nWebpage = Webpage.decode('UTF-8')\nprint(Webpage)\na = re.findall('var m_nRecordCount = (\\\\d+)', str(Webpage))\nprint(a)\ntotal_page = math.ceil(int(a[0]) / 20)\nprint(total_page)\n",
"step-5": "from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport math\n\nurl_header = \"http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314\"\nWebpage = urllib.request.urlopen(url_header).read()\nWebpage=Webpage.decode('UTF-8')\n# soup = BeautifulSoup(Webpage)\nprint (Webpage)\na=re.findall(r'var m_nRecordCount = (\\d+)',str(Webpage))\nprint(a)\n# page_count=soup.find('script')\n# print(page_count)\ntotal_page=math.ceil(int(a[0])/20)\nprint(total_page)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs
res = []
min_ = strs[0]
for i in range(len(strs)):
if min_ > strs[i]:
min_ = strs[i]
for i in range(len(min_)):
count = 0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count += 1
if count == len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs) == 0:
return ''
if len(strs) == 1:
return strs
res = []
min_ = strs[0]
for i in range(len(strs)):
if min_ > strs[i]:
min_ = strs[i]
for i in range(len(min_)):
count = 0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count += 1
if count == len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
if __name__ == '__main__':
a = ['abc', 'abcc', 'asc', 'abcd']
b = ['c', 'c']
print(Solution().longestCommonPrefix(b))
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/14 下午6:06
# @Author : Huang HUi
# @Site :
# @File : Longest Common Prefix.py
# @Software: PyCharm
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ''
if len(strs)==1 :
return strs
res=[]
min_=strs[0]
for i in range(len(strs)):
if min_>strs[i]:
min_=strs[i]
for i in range(len(min_)):
count=0
for j in range(len(strs)):
if min_[i] in strs[j][i]:
count+=1
if count==len(strs):
res.append(min_[i])
else:
break
return ''.join(res)
if __name__ == '__main__':
a=["abc","abcc","asc","abcd"]
b=["c","c"]
print(Solution().longestCommonPrefix(b))
|
flexible
|
{
"blob_id": "1aed8e92a31ee42a3a609123af927f7074598ec1",
"index": 1820,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n if len(strs) == 1:\n return strs\n res = []\n min_ = strs[0]\n for i in range(len(strs)):\n if min_ > strs[i]:\n min_ = strs[i]\n for i in range(len(min_)):\n count = 0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count += 1\n if count == len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\n<mask token>\n",
"step-4": "class Solution(object):\n\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return ''\n if len(strs) == 1:\n return strs\n res = []\n min_ = strs[0]\n for i in range(len(strs)):\n if min_ > strs[i]:\n min_ = strs[i]\n for i in range(len(min_)):\n count = 0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count += 1\n if count == len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\nif __name__ == '__main__':\n a = ['abc', 'abcc', 'asc', 'abcd']\n b = ['c', 'c']\n print(Solution().longestCommonPrefix(b))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/7/14 下午6:06\n# @Author : Huang HUi\n# @Site : \n# @File : Longest Common Prefix.py\n# @Software: PyCharm\n\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs)==0:\n return ''\n if len(strs)==1 :\n return strs\n res=[]\n min_=strs[0]\n for i in range(len(strs)):\n if min_>strs[i]:\n min_=strs[i]\n for i in range(len(min_)):\n count=0\n for j in range(len(strs)):\n if min_[i] in strs[j][i]:\n count+=1\n if count==len(strs):\n res.append(min_[i])\n else:\n break\n return ''.join(res)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n a=[\"abc\",\"abcc\",\"asc\",\"abcd\"]\n b=[\"c\",\"c\"]\n print(Solution().longestCommonPrefix(b))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
<|reserved_special_token_0|>
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
<|reserved_special_token_0|>
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
<|reserved_special_token_0|>
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self
)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _(
'form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry(
'Confirmed registration data privacy', request.
remote_addr, stored_data['idnr'],
'registration_confirm_data_privacy', stored_data[
'registration_confirm_data_privacy'])
create_audit_log_confirmation_entry(
'Confirmed registration terms of service', request.
remote_addr, stored_data['idnr'],
'registration_confirm_terms_of_service', stored_data[
'registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry(
'Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry(
'Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
render_info.next_url = self.url_for_step(
UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info('Could not request unlock code for user',
exc_info=True)
pass
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep
.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':
datetime.date(1985, 1, 1), 'registration_confirm_data_privacy':
True, 'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True, 'registration_confirm_e_data':
True}
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self
)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _(
'form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry(
'Confirmed registration data privacy', request.
remote_addr, stored_data['idnr'],
'registration_confirm_data_privacy', stored_data[
'registration_confirm_data_privacy'])
create_audit_log_confirmation_entry(
'Confirmed registration terms of service', request.
remote_addr, stored_data['idnr'],
'registration_confirm_terms_of_service', stored_data[
'registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry(
'Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry(
'Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
render_info.next_url = self.url_for_step(
UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info('Could not request unlock code for user',
exc_info=True)
pass
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep
.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
<|reserved_special_token_1|>
import datetime
from flask import request
from flask_babel import _
from markupsafe import escape
from app import app
from app.data_access.audit_log_controller import create_audit_log_confirmation_entry
from app.data_access.user_controller import user_exists, create_user
from app.data_access.user_controller_errors import UserAlreadyExistsError
from app.elster_client import elster_client
from app.elster_client.elster_errors import ElsterProcessNotSuccessful
from app.forms.flows.multistep_flow import MultiStepFlow
from app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, UnlockCodeRequestFailureStep
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':
datetime.date(1985, 1, 1), 'registration_confirm_data_privacy':
True, 'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True, 'registration_confirm_e_data':
True}
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(
'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],
endpoint=endpoint)
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self
)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _(
'form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry(
'Confirmed registration data privacy', request.
remote_addr, stored_data['idnr'],
'registration_confirm_data_privacy', stored_data[
'registration_confirm_data_privacy'])
create_audit_log_confirmation_entry(
'Confirmed registration terms of service', request.
remote_addr, stored_data['idnr'],
'registration_confirm_terms_of_service', stored_data[
'registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry(
'Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry(
'Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
render_info.next_url = self.url_for_step(
UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info('Could not request unlock code for user',
exc_info=True)
pass
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep
.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(
request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)
<|reserved_special_token_1|>
import datetime
from flask import request
from flask_babel import _
from markupsafe import escape
from app import app
from app.data_access.audit_log_controller import create_audit_log_confirmation_entry
from app.data_access.user_controller import user_exists, create_user
from app.data_access.user_controller_errors import UserAlreadyExistsError
from app.elster_client import elster_client
from app.elster_client.elster_errors import ElsterProcessNotSuccessful
from app.forms.flows.multistep_flow import MultiStepFlow
from app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, \
UnlockCodeRequestFailureStep
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = (
UnlockCodeRequestInputStep,
{
'idnr': '04452397687',
'dob': datetime.date(1985, 1, 1),
'registration_confirm_data_privacy': True,
'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True,
'registration_confirm_e_data': True,
}
)
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(
title=_('form.auth-request.title'),
steps=[
UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep,
UnlockCodeRequestSuccessStep
],
endpoint=endpoint,
)
# TODO: Use inheritance to clean up this method
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _('form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry('Confirmed registration data privacy', request.remote_addr,
stored_data['idnr'], 'registration_confirm_data_privacy',
stored_data['registration_confirm_data_privacy'])
create_audit_log_confirmation_entry('Confirmed registration terms of service', request.remote_addr,
stored_data['idnr'], 'registration_confirm_terms_of_service',
stored_data['registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry('Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry('Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
# prevent going to failure page as in normal flow
render_info.next_url = self.url_for_step(UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info("Could not request unlock code for user", exc_info=True)
pass # go to failure step
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime("%d.%m.%Y"), request_id)
|
flexible
|
{
"blob_id": "cddb16a305f74eb1a3f2854208f8508c4a7a8953",
"index": 649,
"step-1": "<mask token>\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n <mask token>\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n <mask token>\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-2": "<mask token>\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n <mask token>\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self\n )._handle_specifics_for_step(step, render_info, stored_data)\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _(\n 'form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry(\n 'Confirmed registration data privacy', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_data_privacy', stored_data[\n 'registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration terms of service', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_terms_of_service', stored_data[\n 'registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n render_info.next_url = self.url_for_step(\n UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info('Could not request unlock code for user',\n exc_info=True)\n pass\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep\n .name)\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-3": "<mask token>\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n _DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':\n datetime.date(1985, 1, 1), 'registration_confirm_data_privacy': \n True, 'registration_confirm_terms_of_service': True,\n 'registration_confirm_incomes': True, 'registration_confirm_e_data':\n True}\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self\n )._handle_specifics_for_step(step, render_info, stored_data)\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _(\n 'form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry(\n 'Confirmed registration data privacy', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_data_privacy', stored_data[\n 'registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration terms of service', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_terms_of_service', stored_data[\n 'registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n render_info.next_url = self.url_for_step(\n UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info('Could not request unlock code for user',\n exc_info=True)\n pass\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep\n .name)\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-4": "import datetime\nfrom flask import request\nfrom flask_babel import _\nfrom markupsafe import escape\nfrom app import app\nfrom app.data_access.audit_log_controller import create_audit_log_confirmation_entry\nfrom app.data_access.user_controller import user_exists, create_user\nfrom app.data_access.user_controller_errors import UserAlreadyExistsError\nfrom app.elster_client import elster_client\nfrom app.elster_client.elster_errors import ElsterProcessNotSuccessful\nfrom app.forms.flows.multistep_flow import MultiStepFlow\nfrom app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, UnlockCodeRequestFailureStep\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n _DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':\n datetime.date(1985, 1, 1), 'registration_confirm_data_privacy': \n True, 'registration_confirm_terms_of_service': True,\n 'registration_confirm_incomes': True, 'registration_confirm_e_data':\n True}\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self\n )._handle_specifics_for_step(step, render_info, stored_data)\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _(\n 'form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry(\n 'Confirmed registration data privacy', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_data_privacy', stored_data[\n 'registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration terms of service', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_terms_of_service', stored_data[\n 'registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n render_info.next_url = self.url_for_step(\n UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info('Could not request unlock code for user',\n exc_info=True)\n pass\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep\n .name)\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-5": "import datetime\n\nfrom flask import request\nfrom flask_babel import _\nfrom markupsafe import escape\n\nfrom app import app\nfrom app.data_access.audit_log_controller import create_audit_log_confirmation_entry\nfrom app.data_access.user_controller import user_exists, create_user\nfrom app.data_access.user_controller_errors import UserAlreadyExistsError\nfrom app.elster_client import elster_client\nfrom app.elster_client.elster_errors import ElsterProcessNotSuccessful\nfrom app.forms.flows.multistep_flow import MultiStepFlow\nfrom app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, \\\n UnlockCodeRequestFailureStep\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n\n _DEBUG_DATA = (\n UnlockCodeRequestInputStep,\n {\n 'idnr': '04452397687',\n 'dob': datetime.date(1985, 1, 1),\n 'registration_confirm_data_privacy': True,\n 'registration_confirm_terms_of_service': True,\n 'registration_confirm_incomes': True,\n 'registration_confirm_e_data': True,\n }\n )\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(\n title=_('form.auth-request.title'),\n steps=[\n UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep,\n UnlockCodeRequestSuccessStep\n ],\n endpoint=endpoint,\n )\n\n # TODO: Use inheritance to clean up this method\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self)._handle_specifics_for_step(step, render_info, stored_data)\n\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _('form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry('Confirmed registration data privacy', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_data_privacy',\n stored_data['registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry('Confirmed registration terms of service', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_terms_of_service',\n stored_data['registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry('Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry('Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n # prevent going to failure page as in normal flow\n render_info.next_url = self.url_for_step(UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info(\"Could not request unlock code for user\", exc_info=True)\n pass # go to failure step\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep.name)\n\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n\n response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n\n create_user(idnr, request_form['dob'].strftime(\"%d.%m.%Y\"), request_id)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@bp.route('/')
@bp.route('/index')
@login_required
def index():
return render_template('index.html')
<|reserved_special_token_1|>
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import db
from app.models import User
from app.main import bp
@bp.route('/')
@bp.route('/index')
@login_required
def index():
return render_template('index.html')
<|reserved_special_token_1|>
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import db
# from app.main.forms import [list forms here]
from app.models import User
from app.main import bp
@bp.route('/')
@bp.route('/index')
@login_required
def index():
return render_template('index.html')
|
flexible
|
{
"blob_id": "495d606304e07a097033366d1a7e1d856a4cf61f",
"index": 1935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n return render_template('index.html')\n",
"step-3": "from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom werkzeug.urls import url_parse\nfrom app import db\nfrom app.models import User\nfrom app.main import bp\n\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n return render_template('index.html')\n",
"step-4": "from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom werkzeug.urls import url_parse\nfrom app import db\n# from app.main.forms import [list forms here]\nfrom app.models import User\nfrom app.main import bp\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n\treturn render_template('index.html')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
from google.appengine.ext.webapp import template
from google.appengine.ext import ndb
import logging
import os.path
import webapp2
import json
from webapp2_extras import auth
from webapp2_extras import sessions
from webapp2_extras.auth import InvalidAuthIdError
from webapp2_extras.auth import InvalidPasswordError
import tip
def user_required(handler):
"""
Decorator that checks if there's a user associated with the current session.
Will also fail if there's no session present.
"""
def check_login(self, *args, **kwargs):
auth = self.auth
if not auth.get_user_by_session():
self.redirect(self.uri_for('login'), abort=True)
else:
return handler(self, *args, **kwargs)
return check_login
class BaseHandler(webapp2.RequestHandler):
@webapp2.cached_property
def auth(self):
"""Shortcut to access the auth instance as a property."""
return auth.get_auth()
@webapp2.cached_property
def user_info(self):
"""Shortcut to access a subset of the user attributes that are stored
in the session.
The list of attributes to store in the session is specified in
config['webapp2_extras.auth']['user_attributes'].
:returns
A dictionary with most user information
"""
return self.auth.get_user_by_session()
@webapp2.cached_property
def user(self):
"""Shortcut to access the current logged in user.
Unlike user_info, it fetches information from the persistence layer and
returns an instance of the underlying model.
:returns
The instance of the user model associated to the logged in user.
"""
u = self.user_info
return self.user_model.get_by_id(u['user_id']) if u else None
@webapp2.cached_property
def user_model(self):
"""Returns the implementation of the user model.
It is consistent with config['webapp2_extras.auth']['user_model'], if set.
"""
return self.auth.store.user_model
@webapp2.cached_property
def session(self):
"""Shortcut to access the current session."""
return self.session_store.get_session(backend="datastore")
def render_template(self, view_filename, params=None):
if not params:
params = {}
user = self.user_info
params['user'] = user
path = os.path.join(os.path.dirname(__file__), 'views', view_filename)
self.response.out.write(template.render(path, params))
def send_json(self, message):
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(json.dumps(message))
def display_message(self, message):
"""Utility function to display a template with a simple message."""
params = {
'message': message
}
self.render_template('message.html', params)
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandler(BaseHandler):
def get(self):
user = self.user
if not user:
self.render_template('about.html')
else:
params = {
'balance': user.balance,
}
self.render_template('home.html', params)
class AboutHandler(BaseHandler):
def get(self):
self.render_template('about.html')
class TrendingHandler(BaseHandler):
def get(self):
self.render_template('trending.html')
class TipHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
failed=False
user = self.user
tipReceiver = self.request.get('tipReceiver')
tipReceiver = self.user_model.get_by_auth_id(tipReceiver)
amount = self.request.get('tip')
amount = float(amount)
try:
tip.tip(user, tipReceiver, amount)
except:
failed=True
self._serve_page(failed)
def _serve_page(self, failed=False):
params = {
'failed': failed
}
self.render_template('tip.html', params)
def serve_profile_page(self):
user = self.user
params = {
'auth_id': user.auth_ids[0],
'first_name': user.name,
'last_name': user.last_name,
'email_address': user.email_address,
'balance': user.balance,
}
self.render_template('profile.html', params)
class AddCreditsHandler(BaseHandler):
@user_required
def get(self):
self._serve_page()
@user_required
def post(self):
user = self.user
credits = self.request.get('credits')
credits = float(credits)
user.balance += credits
user.put()
#User a redirect here instead
serve_profile_page(self)
def _serve_page(self):
user = self.user
params = {
}
self.render_template('add_credits.html', params)
class LogHandler(BaseHandler):
@user_required
def get(self):
user = self.user
keys = tip.TipTransactionLogShardConfig.all_keys(user)
logs = keys[0].get()
if logs:
message = { 'logs': logs.logs }
else:
message = None
self.send_json(message)
class ProfileHandler(BaseHandler):
@user_required
def get(self):
serve_profile_page(self)
class SignupHandler(BaseHandler):
def get(self):
self.render_template('signup.html')
def post(self):
user_name = self.request.get('username')
email = self.request.get('email')
name = self.request.get('name')
password = self.request.get('password')
last_name = self.request.get('lastname')
unique_properties = ['email_address']
user_data = self.user_model.create_user(user_name,
unique_properties,
email_address=email, name=name, password_raw=password,
last_name=last_name, balance=float(0), tip_log_count=0, verified=False)
if not user_data[0]: #user_data is a tuple
self.display_message('Unable to create user for email %s because of \
duplicate keys %s' % (user_name, user_data[1]))
return
user = user_data[1]
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='v', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to verify their address. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
class ForgotPasswordHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
user = self.user_model.get_by_auth_id(username)
if not user:
logging.info('Could not find any user entry for username %s', username)
self._serve_page(not_found=True)
return
user_id = user.get_id()
token = self.user_model.create_signup_token(user_id)
verification_url = self.uri_for('verification', type='p', user_id=user_id,
signup_token=token, _full=True)
msg = 'Send an email to user in order to reset their password. \
They will be able to do so by visiting <a href="{url}">{url}</a>'
self.display_message(msg.format(url=verification_url))
def _serve_page(self, not_found=False):
username = self.request.get('username')
params = {
'username': username,
'not_found': not_found
}
self.render_template('forgot.html', params)
class VerificationHandler(BaseHandler):
def get(self, *args, **kwargs):
user = None
user_id = kwargs['user_id']
signup_token = kwargs['signup_token']
verification_type = kwargs['type']
# it should be something more concise like
# self.auth.get_user_by_token(user_id, signup_token)
# unfortunately the auth interface does not (yet) allow to manipulate
# signup tokens concisely
user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,
'signup')
if not user:
logging.info('Could not find any user with id "%s" signup token "%s"',
user_id, signup_token)
self.abort(404)
# store user data in the session
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if verification_type == 'v':
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), signup_token)
if not user.verified:
user.verified = True
user.put()
self.display_message('User email address has been verified.')
return
elif verification_type == 'p':
# supply user to the page
params = {
'user': user,
'token': signup_token
}
self.render_template('resetpassword.html', params)
else:
logging.info('verification type not supported')
self.abort(404)
class SetPasswordHandler(BaseHandler):
@user_required
def post(self):
password = self.request.get('password')
old_token = self.request.get('t')
if not password or password != self.request.get('confirm_password'):
self.display_message('passwords do not match')
return
user = self.user
user.set_password(password)
user.put()
# remove signup token, we don't want users to come back with an old link
self.user_model.delete_signup_token(user.get_id(), old_token)
self.display_message('Password updated')
class LoginHandler(BaseHandler):
def get(self):
self._serve_page()
def post(self):
username = self.request.get('username')
password = self.request.get('password')
try:
u = self.auth.get_user_by_password(username, password, remember=True,
save_session=True)
user = self.user
tip.coalesce_balance(user)
self.redirect(self.uri_for('home'))
except (InvalidAuthIdError, InvalidPasswordError) as e:
logging.info('Login failed for user %s because of %s', username, type(e))
self._serve_page(True)
def _serve_page(self, failed=False):
username = self.request.get('username')
params = {
'username': username,
'failed': failed
}
self.render_template('login.html', params)
class LogoutHandler(BaseHandler):
def get(self):
self.auth.unset_session()
self.redirect(self.uri_for('home'))
config = {
'webapp2_extras.auth': {
'user_model': 'models.User',
'user_attributes': ['name']
},
'webapp2_extras.sessions': {
'secret_key': 'YOUR_SECRET_KEY'
}
}
app = webapp2.WSGIApplication([
webapp2.Route('/', MainHandler, name='home'),
webapp2.Route('/home', MainHandler, name='home'),
webapp2.Route('/about', AboutHandler, name='about'),
webapp2.Route('/trending', TrendingHandler, name='trending'),
webapp2.Route('/tip', TipHandler, name='tip'),
webapp2.Route('/add_credits', AddCreditsHandler, name='add_credits'),
webapp2.Route('/get_logs', LogHandler, name='get_logs'),
webapp2.Route('/profile', ProfileHandler, name='profile'),
webapp2.Route('/signup', SignupHandler),
webapp2.Route('/<type:v|p>/<user_id:\d+>-<signup_token:.+>',
handler=VerificationHandler, name='verification'),
webapp2.Route('/password', SetPasswordHandler),
webapp2.Route('/forgot', ForgotPasswordHandler, name='forgot'),
webapp2.Route('/login', LoginHandler, name='login'),
webapp2.Route('/logout', LogoutHandler, name='logout'),
], debug=True, config=config)
logging.getLogger().setLevel(logging.DEBUG)
|
normal
|
{
"blob_id": "fe7fb9a4a5ca2bb8dab0acf440eb2fac127264ce",
"index": 2631,
"step-1": "<mask token>\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n @webapp2.cached_property\n def auth(self):\n \"\"\"Shortcut to access the auth instance as a property.\"\"\"\n return auth.get_auth()\n <mask token>\n <mask token>\n <mask token>\n\n @webapp2.cached_property\n def session(self):\n \"\"\"Shortcut to access the current session.\"\"\"\n return self.session_store.get_session(backend='datastore')\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n user = self.user_info\n params['user'] = user\n path = os.path.join(os.path.dirname(__file__), 'views', view_filename)\n self.response.out.write(template.render(path, params))\n <mask token>\n <mask token>\n\n def dispatch(self):\n self.session_store = sessions.get_store(request=self.request)\n try:\n webapp2.RequestHandler.dispatch(self)\n finally:\n self.session_store.save_sessions(self.response)\n\n\nclass MainHandler(BaseHandler):\n\n def get(self):\n user = self.user\n if not user:\n self.render_template('about.html')\n else:\n params = {'balance': user.balance}\n self.render_template('home.html', params)\n\n\nclass AboutHandler(BaseHandler):\n\n def get(self):\n self.render_template('about.html')\n\n\nclass TrendingHandler(BaseHandler):\n\n def get(self):\n self.render_template('trending.html')\n\n\nclass TipHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n failed = False\n user = self.user\n tipReceiver = self.request.get('tipReceiver')\n tipReceiver = self.user_model.get_by_auth_id(tipReceiver)\n amount = self.request.get('tip')\n amount = float(amount)\n try:\n tip.tip(user, tipReceiver, amount)\n except:\n failed = True\n self._serve_page(failed)\n\n def _serve_page(self, failed=False):\n params = {'failed': failed}\n self.render_template('tip.html', params)\n\n\n<mask token>\n\n\nclass AddCreditsHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n user = self.user\n credits = self.request.get('credits')\n credits = float(credits)\n user.balance += credits\n user.put()\n serve_profile_page(self)\n\n def _serve_page(self):\n user = self.user\n params = {}\n self.render_template('add_credits.html', params)\n\n\nclass LogHandler(BaseHandler):\n\n @user_required\n def get(self):\n user = self.user\n keys = tip.TipTransactionLogShardConfig.all_keys(user)\n logs = keys[0].get()\n if logs:\n message = {'logs': logs.logs}\n else:\n message = None\n self.send_json(message)\n\n\nclass ProfileHandler(BaseHandler):\n\n @user_required\n def get(self):\n serve_profile_page(self)\n\n\nclass SignupHandler(BaseHandler):\n\n def get(self):\n self.render_template('signup.html')\n\n def post(self):\n user_name = self.request.get('username')\n email = self.request.get('email')\n name = self.request.get('name')\n password = self.request.get('password')\n last_name = self.request.get('lastname')\n unique_properties = ['email_address']\n user_data = self.user_model.create_user(user_name,\n unique_properties, email_address=email, name=name, password_raw\n =password, last_name=last_name, balance=float(0), tip_log_count\n =0, verified=False)\n if not user_data[0]:\n self.display_message(\n 'Unable to create user for email %s because of duplicate keys %s'\n % (user_name, user_data[1]))\n return\n user = user_data[1]\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to verify their address. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n\nclass ForgotPasswordHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n user = self.user_model.get_by_auth_id(username)\n if not user:\n logging.info('Could not find any user entry for username %s',\n username)\n self._serve_page(not_found=True)\n return\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='p', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to reset their password. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n def _serve_page(self, not_found=False):\n username = self.request.get('username')\n params = {'username': username, 'not_found': not_found}\n self.render_template('forgot.html', params)\n\n\nclass VerificationHandler(BaseHandler):\n\n def get(self, *args, **kwargs):\n user = None\n user_id = kwargs['user_id']\n signup_token = kwargs['signup_token']\n verification_type = kwargs['type']\n user, ts = self.user_model.get_by_auth_token(int(user_id),\n signup_token, 'signup')\n if not user:\n logging.info(\n 'Could not find any user with id \"%s\" signup token \"%s\"',\n user_id, signup_token)\n self.abort(404)\n self.auth.set_session(self.auth.store.user_to_dict(user), remember=True\n )\n if verification_type == 'v':\n self.user_model.delete_signup_token(user.get_id(), signup_token)\n if not user.verified:\n user.verified = True\n user.put()\n self.display_message('User email address has been verified.')\n return\n elif verification_type == 'p':\n params = {'user': user, 'token': signup_token}\n self.render_template('resetpassword.html', params)\n else:\n logging.info('verification type not supported')\n self.abort(404)\n\n\nclass SetPasswordHandler(BaseHandler):\n\n @user_required\n def post(self):\n password = self.request.get('password')\n old_token = self.request.get('t')\n if not password or password != self.request.get('confirm_password'):\n self.display_message('passwords do not match')\n return\n user = self.user\n user.set_password(password)\n user.put()\n self.user_model.delete_signup_token(user.get_id(), old_token)\n self.display_message('Password updated')\n\n\nclass LoginHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n try:\n u = self.auth.get_user_by_password(username, password, remember\n =True, save_session=True)\n user = self.user\n tip.coalesce_balance(user)\n self.redirect(self.uri_for('home'))\n except (InvalidAuthIdError, InvalidPasswordError) as e:\n logging.info('Login failed for user %s because of %s', username,\n type(e))\n self._serve_page(True)\n\n def _serve_page(self, failed=False):\n username = self.request.get('username')\n params = {'username': username, 'failed': failed}\n self.render_template('login.html', params)\n\n\nclass LogoutHandler(BaseHandler):\n\n def get(self):\n self.auth.unset_session()\n self.redirect(self.uri_for('home'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n @webapp2.cached_property\n def auth(self):\n \"\"\"Shortcut to access the auth instance as a property.\"\"\"\n return auth.get_auth()\n <mask token>\n\n @webapp2.cached_property\n def user(self):\n \"\"\"Shortcut to access the current logged in user.\n\n Unlike user_info, it fetches information from the persistence layer and\n returns an instance of the underlying model.\n\n :returns\n The instance of the user model associated to the logged in user.\n \"\"\"\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None\n <mask token>\n\n @webapp2.cached_property\n def session(self):\n \"\"\"Shortcut to access the current session.\"\"\"\n return self.session_store.get_session(backend='datastore')\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n user = self.user_info\n params['user'] = user\n path = os.path.join(os.path.dirname(__file__), 'views', view_filename)\n self.response.out.write(template.render(path, params))\n <mask token>\n\n def display_message(self, message):\n \"\"\"Utility function to display a template with a simple message.\"\"\"\n params = {'message': message}\n self.render_template('message.html', params)\n\n def dispatch(self):\n self.session_store = sessions.get_store(request=self.request)\n try:\n webapp2.RequestHandler.dispatch(self)\n finally:\n self.session_store.save_sessions(self.response)\n\n\nclass MainHandler(BaseHandler):\n\n def get(self):\n user = self.user\n if not user:\n self.render_template('about.html')\n else:\n params = {'balance': user.balance}\n self.render_template('home.html', params)\n\n\nclass AboutHandler(BaseHandler):\n\n def get(self):\n self.render_template('about.html')\n\n\nclass TrendingHandler(BaseHandler):\n\n def get(self):\n self.render_template('trending.html')\n\n\nclass TipHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n failed = False\n user = self.user\n tipReceiver = self.request.get('tipReceiver')\n tipReceiver = self.user_model.get_by_auth_id(tipReceiver)\n amount = self.request.get('tip')\n amount = float(amount)\n try:\n tip.tip(user, tipReceiver, amount)\n except:\n failed = True\n self._serve_page(failed)\n\n def _serve_page(self, failed=False):\n params = {'failed': failed}\n self.render_template('tip.html', params)\n\n\n<mask token>\n\n\nclass AddCreditsHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n user = self.user\n credits = self.request.get('credits')\n credits = float(credits)\n user.balance += credits\n user.put()\n serve_profile_page(self)\n\n def _serve_page(self):\n user = self.user\n params = {}\n self.render_template('add_credits.html', params)\n\n\nclass LogHandler(BaseHandler):\n\n @user_required\n def get(self):\n user = self.user\n keys = tip.TipTransactionLogShardConfig.all_keys(user)\n logs = keys[0].get()\n if logs:\n message = {'logs': logs.logs}\n else:\n message = None\n self.send_json(message)\n\n\nclass ProfileHandler(BaseHandler):\n\n @user_required\n def get(self):\n serve_profile_page(self)\n\n\nclass SignupHandler(BaseHandler):\n\n def get(self):\n self.render_template('signup.html')\n\n def post(self):\n user_name = self.request.get('username')\n email = self.request.get('email')\n name = self.request.get('name')\n password = self.request.get('password')\n last_name = self.request.get('lastname')\n unique_properties = ['email_address']\n user_data = self.user_model.create_user(user_name,\n unique_properties, email_address=email, name=name, password_raw\n =password, last_name=last_name, balance=float(0), tip_log_count\n =0, verified=False)\n if not user_data[0]:\n self.display_message(\n 'Unable to create user for email %s because of duplicate keys %s'\n % (user_name, user_data[1]))\n return\n user = user_data[1]\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to verify their address. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n\nclass ForgotPasswordHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n user = self.user_model.get_by_auth_id(username)\n if not user:\n logging.info('Could not find any user entry for username %s',\n username)\n self._serve_page(not_found=True)\n return\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='p', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to reset their password. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n def _serve_page(self, not_found=False):\n username = self.request.get('username')\n params = {'username': username, 'not_found': not_found}\n self.render_template('forgot.html', params)\n\n\nclass VerificationHandler(BaseHandler):\n\n def get(self, *args, **kwargs):\n user = None\n user_id = kwargs['user_id']\n signup_token = kwargs['signup_token']\n verification_type = kwargs['type']\n user, ts = self.user_model.get_by_auth_token(int(user_id),\n signup_token, 'signup')\n if not user:\n logging.info(\n 'Could not find any user with id \"%s\" signup token \"%s\"',\n user_id, signup_token)\n self.abort(404)\n self.auth.set_session(self.auth.store.user_to_dict(user), remember=True\n )\n if verification_type == 'v':\n self.user_model.delete_signup_token(user.get_id(), signup_token)\n if not user.verified:\n user.verified = True\n user.put()\n self.display_message('User email address has been verified.')\n return\n elif verification_type == 'p':\n params = {'user': user, 'token': signup_token}\n self.render_template('resetpassword.html', params)\n else:\n logging.info('verification type not supported')\n self.abort(404)\n\n\nclass SetPasswordHandler(BaseHandler):\n\n @user_required\n def post(self):\n password = self.request.get('password')\n old_token = self.request.get('t')\n if not password or password != self.request.get('confirm_password'):\n self.display_message('passwords do not match')\n return\n user = self.user\n user.set_password(password)\n user.put()\n self.user_model.delete_signup_token(user.get_id(), old_token)\n self.display_message('Password updated')\n\n\nclass LoginHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n try:\n u = self.auth.get_user_by_password(username, password, remember\n =True, save_session=True)\n user = self.user\n tip.coalesce_balance(user)\n self.redirect(self.uri_for('home'))\n except (InvalidAuthIdError, InvalidPasswordError) as e:\n logging.info('Login failed for user %s because of %s', username,\n type(e))\n self._serve_page(True)\n\n def _serve_page(self, failed=False):\n username = self.request.get('username')\n params = {'username': username, 'failed': failed}\n self.render_template('login.html', params)\n\n\nclass LogoutHandler(BaseHandler):\n\n def get(self):\n self.auth.unset_session()\n self.redirect(self.uri_for('home'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef user_required(handler):\n \"\"\"\n Decorator that checks if there's a user associated with the current session.\n Will also fail if there's no session present.\n \"\"\"\n\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect(self.uri_for('login'), abort=True)\n else:\n return handler(self, *args, **kwargs)\n return check_login\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n @webapp2.cached_property\n def auth(self):\n \"\"\"Shortcut to access the auth instance as a property.\"\"\"\n return auth.get_auth()\n\n @webapp2.cached_property\n def user_info(self):\n \"\"\"Shortcut to access a subset of the user attributes that are stored\n in the session.\n\n The list of attributes to store in the session is specified in\n config['webapp2_extras.auth']['user_attributes'].\n :returns\n A dictionary with most user information\n \"\"\"\n return self.auth.get_user_by_session()\n\n @webapp2.cached_property\n def user(self):\n \"\"\"Shortcut to access the current logged in user.\n\n Unlike user_info, it fetches information from the persistence layer and\n returns an instance of the underlying model.\n\n :returns\n The instance of the user model associated to the logged in user.\n \"\"\"\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None\n\n @webapp2.cached_property\n def user_model(self):\n \"\"\"Returns the implementation of the user model.\n\n It is consistent with config['webapp2_extras.auth']['user_model'], if set.\n \"\"\"\n return self.auth.store.user_model\n\n @webapp2.cached_property\n def session(self):\n \"\"\"Shortcut to access the current session.\"\"\"\n return self.session_store.get_session(backend='datastore')\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n user = self.user_info\n params['user'] = user\n path = os.path.join(os.path.dirname(__file__), 'views', view_filename)\n self.response.out.write(template.render(path, params))\n\n def send_json(self, message):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(json.dumps(message))\n\n def display_message(self, message):\n \"\"\"Utility function to display a template with a simple message.\"\"\"\n params = {'message': message}\n self.render_template('message.html', params)\n\n def dispatch(self):\n self.session_store = sessions.get_store(request=self.request)\n try:\n webapp2.RequestHandler.dispatch(self)\n finally:\n self.session_store.save_sessions(self.response)\n\n\nclass MainHandler(BaseHandler):\n\n def get(self):\n user = self.user\n if not user:\n self.render_template('about.html')\n else:\n params = {'balance': user.balance}\n self.render_template('home.html', params)\n\n\nclass AboutHandler(BaseHandler):\n\n def get(self):\n self.render_template('about.html')\n\n\nclass TrendingHandler(BaseHandler):\n\n def get(self):\n self.render_template('trending.html')\n\n\nclass TipHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n failed = False\n user = self.user\n tipReceiver = self.request.get('tipReceiver')\n tipReceiver = self.user_model.get_by_auth_id(tipReceiver)\n amount = self.request.get('tip')\n amount = float(amount)\n try:\n tip.tip(user, tipReceiver, amount)\n except:\n failed = True\n self._serve_page(failed)\n\n def _serve_page(self, failed=False):\n params = {'failed': failed}\n self.render_template('tip.html', params)\n\n\ndef serve_profile_page(self):\n user = self.user\n params = {'auth_id': user.auth_ids[0], 'first_name': user.name,\n 'last_name': user.last_name, 'email_address': user.email_address,\n 'balance': user.balance}\n self.render_template('profile.html', params)\n\n\nclass AddCreditsHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n user = self.user\n credits = self.request.get('credits')\n credits = float(credits)\n user.balance += credits\n user.put()\n serve_profile_page(self)\n\n def _serve_page(self):\n user = self.user\n params = {}\n self.render_template('add_credits.html', params)\n\n\nclass LogHandler(BaseHandler):\n\n @user_required\n def get(self):\n user = self.user\n keys = tip.TipTransactionLogShardConfig.all_keys(user)\n logs = keys[0].get()\n if logs:\n message = {'logs': logs.logs}\n else:\n message = None\n self.send_json(message)\n\n\nclass ProfileHandler(BaseHandler):\n\n @user_required\n def get(self):\n serve_profile_page(self)\n\n\nclass SignupHandler(BaseHandler):\n\n def get(self):\n self.render_template('signup.html')\n\n def post(self):\n user_name = self.request.get('username')\n email = self.request.get('email')\n name = self.request.get('name')\n password = self.request.get('password')\n last_name = self.request.get('lastname')\n unique_properties = ['email_address']\n user_data = self.user_model.create_user(user_name,\n unique_properties, email_address=email, name=name, password_raw\n =password, last_name=last_name, balance=float(0), tip_log_count\n =0, verified=False)\n if not user_data[0]:\n self.display_message(\n 'Unable to create user for email %s because of duplicate keys %s'\n % (user_name, user_data[1]))\n return\n user = user_data[1]\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to verify their address. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n\nclass ForgotPasswordHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n user = self.user_model.get_by_auth_id(username)\n if not user:\n logging.info('Could not find any user entry for username %s',\n username)\n self._serve_page(not_found=True)\n return\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='p', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to reset their password. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n def _serve_page(self, not_found=False):\n username = self.request.get('username')\n params = {'username': username, 'not_found': not_found}\n self.render_template('forgot.html', params)\n\n\nclass VerificationHandler(BaseHandler):\n\n def get(self, *args, **kwargs):\n user = None\n user_id = kwargs['user_id']\n signup_token = kwargs['signup_token']\n verification_type = kwargs['type']\n user, ts = self.user_model.get_by_auth_token(int(user_id),\n signup_token, 'signup')\n if not user:\n logging.info(\n 'Could not find any user with id \"%s\" signup token \"%s\"',\n user_id, signup_token)\n self.abort(404)\n self.auth.set_session(self.auth.store.user_to_dict(user), remember=True\n )\n if verification_type == 'v':\n self.user_model.delete_signup_token(user.get_id(), signup_token)\n if not user.verified:\n user.verified = True\n user.put()\n self.display_message('User email address has been verified.')\n return\n elif verification_type == 'p':\n params = {'user': user, 'token': signup_token}\n self.render_template('resetpassword.html', params)\n else:\n logging.info('verification type not supported')\n self.abort(404)\n\n\nclass SetPasswordHandler(BaseHandler):\n\n @user_required\n def post(self):\n password = self.request.get('password')\n old_token = self.request.get('t')\n if not password or password != self.request.get('confirm_password'):\n self.display_message('passwords do not match')\n return\n user = self.user\n user.set_password(password)\n user.put()\n self.user_model.delete_signup_token(user.get_id(), old_token)\n self.display_message('Password updated')\n\n\nclass LoginHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n try:\n u = self.auth.get_user_by_password(username, password, remember\n =True, save_session=True)\n user = self.user\n tip.coalesce_balance(user)\n self.redirect(self.uri_for('home'))\n except (InvalidAuthIdError, InvalidPasswordError) as e:\n logging.info('Login failed for user %s because of %s', username,\n type(e))\n self._serve_page(True)\n\n def _serve_page(self, failed=False):\n username = self.request.get('username')\n params = {'username': username, 'failed': failed}\n self.render_template('login.html', params)\n\n\nclass LogoutHandler(BaseHandler):\n\n def get(self):\n self.auth.unset_session()\n self.redirect(self.uri_for('home'))\n\n\n<mask token>\nlogging.getLogger().setLevel(logging.DEBUG)\n",
"step-4": "<mask token>\n\n\ndef user_required(handler):\n \"\"\"\n Decorator that checks if there's a user associated with the current session.\n Will also fail if there's no session present.\n \"\"\"\n\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect(self.uri_for('login'), abort=True)\n else:\n return handler(self, *args, **kwargs)\n return check_login\n\n\nclass BaseHandler(webapp2.RequestHandler):\n\n @webapp2.cached_property\n def auth(self):\n \"\"\"Shortcut to access the auth instance as a property.\"\"\"\n return auth.get_auth()\n\n @webapp2.cached_property\n def user_info(self):\n \"\"\"Shortcut to access a subset of the user attributes that are stored\n in the session.\n\n The list of attributes to store in the session is specified in\n config['webapp2_extras.auth']['user_attributes'].\n :returns\n A dictionary with most user information\n \"\"\"\n return self.auth.get_user_by_session()\n\n @webapp2.cached_property\n def user(self):\n \"\"\"Shortcut to access the current logged in user.\n\n Unlike user_info, it fetches information from the persistence layer and\n returns an instance of the underlying model.\n\n :returns\n The instance of the user model associated to the logged in user.\n \"\"\"\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None\n\n @webapp2.cached_property\n def user_model(self):\n \"\"\"Returns the implementation of the user model.\n\n It is consistent with config['webapp2_extras.auth']['user_model'], if set.\n \"\"\"\n return self.auth.store.user_model\n\n @webapp2.cached_property\n def session(self):\n \"\"\"Shortcut to access the current session.\"\"\"\n return self.session_store.get_session(backend='datastore')\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n user = self.user_info\n params['user'] = user\n path = os.path.join(os.path.dirname(__file__), 'views', view_filename)\n self.response.out.write(template.render(path, params))\n\n def send_json(self, message):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(json.dumps(message))\n\n def display_message(self, message):\n \"\"\"Utility function to display a template with a simple message.\"\"\"\n params = {'message': message}\n self.render_template('message.html', params)\n\n def dispatch(self):\n self.session_store = sessions.get_store(request=self.request)\n try:\n webapp2.RequestHandler.dispatch(self)\n finally:\n self.session_store.save_sessions(self.response)\n\n\nclass MainHandler(BaseHandler):\n\n def get(self):\n user = self.user\n if not user:\n self.render_template('about.html')\n else:\n params = {'balance': user.balance}\n self.render_template('home.html', params)\n\n\nclass AboutHandler(BaseHandler):\n\n def get(self):\n self.render_template('about.html')\n\n\nclass TrendingHandler(BaseHandler):\n\n def get(self):\n self.render_template('trending.html')\n\n\nclass TipHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n failed = False\n user = self.user\n tipReceiver = self.request.get('tipReceiver')\n tipReceiver = self.user_model.get_by_auth_id(tipReceiver)\n amount = self.request.get('tip')\n amount = float(amount)\n try:\n tip.tip(user, tipReceiver, amount)\n except:\n failed = True\n self._serve_page(failed)\n\n def _serve_page(self, failed=False):\n params = {'failed': failed}\n self.render_template('tip.html', params)\n\n\ndef serve_profile_page(self):\n user = self.user\n params = {'auth_id': user.auth_ids[0], 'first_name': user.name,\n 'last_name': user.last_name, 'email_address': user.email_address,\n 'balance': user.balance}\n self.render_template('profile.html', params)\n\n\nclass AddCreditsHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n user = self.user\n credits = self.request.get('credits')\n credits = float(credits)\n user.balance += credits\n user.put()\n serve_profile_page(self)\n\n def _serve_page(self):\n user = self.user\n params = {}\n self.render_template('add_credits.html', params)\n\n\nclass LogHandler(BaseHandler):\n\n @user_required\n def get(self):\n user = self.user\n keys = tip.TipTransactionLogShardConfig.all_keys(user)\n logs = keys[0].get()\n if logs:\n message = {'logs': logs.logs}\n else:\n message = None\n self.send_json(message)\n\n\nclass ProfileHandler(BaseHandler):\n\n @user_required\n def get(self):\n serve_profile_page(self)\n\n\nclass SignupHandler(BaseHandler):\n\n def get(self):\n self.render_template('signup.html')\n\n def post(self):\n user_name = self.request.get('username')\n email = self.request.get('email')\n name = self.request.get('name')\n password = self.request.get('password')\n last_name = self.request.get('lastname')\n unique_properties = ['email_address']\n user_data = self.user_model.create_user(user_name,\n unique_properties, email_address=email, name=name, password_raw\n =password, last_name=last_name, balance=float(0), tip_log_count\n =0, verified=False)\n if not user_data[0]:\n self.display_message(\n 'Unable to create user for email %s because of duplicate keys %s'\n % (user_name, user_data[1]))\n return\n user = user_data[1]\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='v', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to verify their address. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n\nclass ForgotPasswordHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n user = self.user_model.get_by_auth_id(username)\n if not user:\n logging.info('Could not find any user entry for username %s',\n username)\n self._serve_page(not_found=True)\n return\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n verification_url = self.uri_for('verification', type='p', user_id=\n user_id, signup_token=token, _full=True)\n msg = (\n 'Send an email to user in order to reset their password. They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n )\n self.display_message(msg.format(url=verification_url))\n\n def _serve_page(self, not_found=False):\n username = self.request.get('username')\n params = {'username': username, 'not_found': not_found}\n self.render_template('forgot.html', params)\n\n\nclass VerificationHandler(BaseHandler):\n\n def get(self, *args, **kwargs):\n user = None\n user_id = kwargs['user_id']\n signup_token = kwargs['signup_token']\n verification_type = kwargs['type']\n user, ts = self.user_model.get_by_auth_token(int(user_id),\n signup_token, 'signup')\n if not user:\n logging.info(\n 'Could not find any user with id \"%s\" signup token \"%s\"',\n user_id, signup_token)\n self.abort(404)\n self.auth.set_session(self.auth.store.user_to_dict(user), remember=True\n )\n if verification_type == 'v':\n self.user_model.delete_signup_token(user.get_id(), signup_token)\n if not user.verified:\n user.verified = True\n user.put()\n self.display_message('User email address has been verified.')\n return\n elif verification_type == 'p':\n params = {'user': user, 'token': signup_token}\n self.render_template('resetpassword.html', params)\n else:\n logging.info('verification type not supported')\n self.abort(404)\n\n\nclass SetPasswordHandler(BaseHandler):\n\n @user_required\n def post(self):\n password = self.request.get('password')\n old_token = self.request.get('t')\n if not password or password != self.request.get('confirm_password'):\n self.display_message('passwords do not match')\n return\n user = self.user\n user.set_password(password)\n user.put()\n self.user_model.delete_signup_token(user.get_id(), old_token)\n self.display_message('Password updated')\n\n\nclass LoginHandler(BaseHandler):\n\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n try:\n u = self.auth.get_user_by_password(username, password, remember\n =True, save_session=True)\n user = self.user\n tip.coalesce_balance(user)\n self.redirect(self.uri_for('home'))\n except (InvalidAuthIdError, InvalidPasswordError) as e:\n logging.info('Login failed for user %s because of %s', username,\n type(e))\n self._serve_page(True)\n\n def _serve_page(self, failed=False):\n username = self.request.get('username')\n params = {'username': username, 'failed': failed}\n self.render_template('login.html', params)\n\n\nclass LogoutHandler(BaseHandler):\n\n def get(self):\n self.auth.unset_session()\n self.redirect(self.uri_for('home'))\n\n\nconfig = {'webapp2_extras.auth': {'user_model': 'models.User',\n 'user_attributes': ['name']}, 'webapp2_extras.sessions': {'secret_key':\n 'YOUR_SECRET_KEY'}}\napp = webapp2.WSGIApplication([webapp2.Route('/', MainHandler, name='home'),\n webapp2.Route('/home', MainHandler, name='home'), webapp2.Route(\n '/about', AboutHandler, name='about'), webapp2.Route('/trending',\n TrendingHandler, name='trending'), webapp2.Route('/tip', TipHandler,\n name='tip'), webapp2.Route('/add_credits', AddCreditsHandler, name=\n 'add_credits'), webapp2.Route('/get_logs', LogHandler, name='get_logs'),\n webapp2.Route('/profile', ProfileHandler, name='profile'), webapp2.\n Route('/signup', SignupHandler), webapp2.Route(\n '/<type:v|p>/<user_id:\\\\d+>-<signup_token:.+>', handler=\n VerificationHandler, name='verification'), webapp2.Route('/password',\n SetPasswordHandler), webapp2.Route('/forgot', ForgotPasswordHandler,\n name='forgot'), webapp2.Route('/login', LoginHandler, name='login'),\n webapp2.Route('/logout', LogoutHandler, name='logout')], debug=True,\n config=config)\nlogging.getLogger().setLevel(logging.DEBUG)\n",
"step-5": "#!/usr/bin/env python\n\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import ndb\n\nimport logging\nimport os.path\nimport webapp2\nimport json\n\nfrom webapp2_extras import auth\nfrom webapp2_extras import sessions\n\nfrom webapp2_extras.auth import InvalidAuthIdError\nfrom webapp2_extras.auth import InvalidPasswordError\n\nimport tip\n\ndef user_required(handler):\n \"\"\"\n Decorator that checks if there's a user associated with the current session.\n Will also fail if there's no session present.\n \"\"\"\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect(self.uri_for('login'), abort=True)\n else:\n return handler(self, *args, **kwargs)\n\n return check_login\n\nclass BaseHandler(webapp2.RequestHandler):\n @webapp2.cached_property\n def auth(self):\n \"\"\"Shortcut to access the auth instance as a property.\"\"\"\n return auth.get_auth()\n\n @webapp2.cached_property\n def user_info(self):\n \"\"\"Shortcut to access a subset of the user attributes that are stored\n in the session.\n\n The list of attributes to store in the session is specified in\n config['webapp2_extras.auth']['user_attributes'].\n :returns\n A dictionary with most user information\n \"\"\"\n return self.auth.get_user_by_session()\n\n @webapp2.cached_property\n def user(self):\n \"\"\"Shortcut to access the current logged in user.\n\n Unlike user_info, it fetches information from the persistence layer and\n returns an instance of the underlying model.\n\n :returns\n The instance of the user model associated to the logged in user.\n \"\"\"\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None\n\n @webapp2.cached_property\n def user_model(self):\n \"\"\"Returns the implementation of the user model.\n\n It is consistent with config['webapp2_extras.auth']['user_model'], if set.\n \"\"\" \n return self.auth.store.user_model\n\n @webapp2.cached_property\n def session(self):\n \"\"\"Shortcut to access the current session.\"\"\"\n return self.session_store.get_session(backend=\"datastore\")\n\n def render_template(self, view_filename, params=None):\n if not params:\n params = {}\n user = self.user_info\n params['user'] = user\n path = os.path.join(os.path.dirname(__file__), 'views', view_filename)\n self.response.out.write(template.render(path, params))\n\n def send_json(self, message):\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(json.dumps(message))\n\n def display_message(self, message):\n \"\"\"Utility function to display a template with a simple message.\"\"\"\n params = {\n 'message': message\n }\n self.render_template('message.html', params)\n\n # this is needed for webapp2 sessions to work\n def dispatch(self):\n # Get a session store for this request.\n self.session_store = sessions.get_store(request=self.request)\n\n try:\n # Dispatch the request.\n webapp2.RequestHandler.dispatch(self)\n finally:\n # Save all sessions.\n self.session_store.save_sessions(self.response)\n\nclass MainHandler(BaseHandler):\n def get(self):\n user = self.user\n if not user:\n self.render_template('about.html')\n else:\n params = {\n 'balance': user.balance,\n }\n self.render_template('home.html', params)\n\nclass AboutHandler(BaseHandler):\n def get(self):\n self.render_template('about.html')\n\nclass TrendingHandler(BaseHandler):\n def get(self):\n self.render_template('trending.html')\n\nclass TipHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n failed=False\n user = self.user\n tipReceiver = self.request.get('tipReceiver')\n tipReceiver = self.user_model.get_by_auth_id(tipReceiver)\n amount = self.request.get('tip')\n amount = float(amount)\n\n try:\n tip.tip(user, tipReceiver, amount)\n except:\n failed=True\n\n self._serve_page(failed)\n\n def _serve_page(self, failed=False):\n params = {\n 'failed': failed\n }\n self.render_template('tip.html', params)\n\ndef serve_profile_page(self):\n user = self.user\n params = {\n 'auth_id': user.auth_ids[0],\n 'first_name': user.name,\n 'last_name': user.last_name,\n 'email_address': user.email_address,\n 'balance': user.balance,\n }\n\n self.render_template('profile.html', params)\n\nclass AddCreditsHandler(BaseHandler):\n\n @user_required\n def get(self):\n self._serve_page()\n\n @user_required\n def post(self):\n user = self.user\n credits = self.request.get('credits')\n credits = float(credits)\n user.balance += credits \n user.put()\n\n #User a redirect here instead\n serve_profile_page(self)\n\n def _serve_page(self):\n user = self.user\n params = {\n }\n self.render_template('add_credits.html', params)\n\nclass LogHandler(BaseHandler):\n @user_required\n def get(self):\n user = self.user\n keys = tip.TipTransactionLogShardConfig.all_keys(user)\n logs = keys[0].get()\n if logs:\n message = { 'logs': logs.logs }\n else:\n message = None\n self.send_json(message)\n\nclass ProfileHandler(BaseHandler):\n @user_required\n def get(self):\n serve_profile_page(self)\n\nclass SignupHandler(BaseHandler):\n def get(self):\n self.render_template('signup.html')\n\n def post(self):\n user_name = self.request.get('username')\n email = self.request.get('email')\n name = self.request.get('name')\n password = self.request.get('password')\n last_name = self.request.get('lastname')\n\n unique_properties = ['email_address']\n user_data = self.user_model.create_user(user_name,\n unique_properties,\n email_address=email, name=name, password_raw=password,\n last_name=last_name, balance=float(0), tip_log_count=0, verified=False)\n if not user_data[0]: #user_data is a tuple\n self.display_message('Unable to create user for email %s because of \\\n duplicate keys %s' % (user_name, user_data[1]))\n return\n \n user = user_data[1]\n user_id = user.get_id()\n\n token = self.user_model.create_signup_token(user_id)\n\n verification_url = self.uri_for('verification', type='v', user_id=user_id,\n signup_token=token, _full=True)\n\n msg = 'Send an email to user in order to verify their address. \\\n They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n\n self.display_message(msg.format(url=verification_url))\n\nclass ForgotPasswordHandler(BaseHandler):\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n\n user = self.user_model.get_by_auth_id(username)\n if not user:\n logging.info('Could not find any user entry for username %s', username)\n self._serve_page(not_found=True)\n return\n\n user_id = user.get_id()\n token = self.user_model.create_signup_token(user_id)\n\n verification_url = self.uri_for('verification', type='p', user_id=user_id,\n signup_token=token, _full=True)\n\n msg = 'Send an email to user in order to reset their password. \\\n They will be able to do so by visiting <a href=\"{url}\">{url}</a>'\n\n self.display_message(msg.format(url=verification_url))\n \n def _serve_page(self, not_found=False):\n username = self.request.get('username')\n params = {\n 'username': username,\n 'not_found': not_found\n }\n self.render_template('forgot.html', params)\n\n\nclass VerificationHandler(BaseHandler):\n def get(self, *args, **kwargs):\n user = None\n user_id = kwargs['user_id']\n signup_token = kwargs['signup_token']\n verification_type = kwargs['type']\n\n # it should be something more concise like\n # self.auth.get_user_by_token(user_id, signup_token)\n # unfortunately the auth interface does not (yet) allow to manipulate\n # signup tokens concisely\n user, ts = self.user_model.get_by_auth_token(int(user_id), signup_token,\n 'signup')\n\n if not user:\n logging.info('Could not find any user with id \"%s\" signup token \"%s\"',\n user_id, signup_token)\n self.abort(404)\n \n # store user data in the session\n self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)\n\n if verification_type == 'v':\n # remove signup token, we don't want users to come back with an old link\n self.user_model.delete_signup_token(user.get_id(), signup_token)\n\n if not user.verified:\n user.verified = True\n user.put()\n\n self.display_message('User email address has been verified.')\n return\n elif verification_type == 'p':\n # supply user to the page\n params = {\n 'user': user,\n 'token': signup_token\n }\n self.render_template('resetpassword.html', params)\n else:\n logging.info('verification type not supported')\n self.abort(404)\n\nclass SetPasswordHandler(BaseHandler):\n\n @user_required\n def post(self):\n password = self.request.get('password')\n old_token = self.request.get('t')\n\n if not password or password != self.request.get('confirm_password'):\n self.display_message('passwords do not match')\n return\n\n user = self.user\n user.set_password(password)\n user.put()\n\n # remove signup token, we don't want users to come back with an old link\n self.user_model.delete_signup_token(user.get_id(), old_token)\n \n self.display_message('Password updated')\n\nclass LoginHandler(BaseHandler):\n def get(self):\n self._serve_page()\n\n def post(self):\n username = self.request.get('username')\n password = self.request.get('password')\n try:\n u = self.auth.get_user_by_password(username, password, remember=True,\n save_session=True)\n user = self.user\n tip.coalesce_balance(user)\n self.redirect(self.uri_for('home'))\n except (InvalidAuthIdError, InvalidPasswordError) as e:\n logging.info('Login failed for user %s because of %s', username, type(e))\n self._serve_page(True)\n\n def _serve_page(self, failed=False):\n username = self.request.get('username')\n params = {\n 'username': username,\n 'failed': failed\n }\n self.render_template('login.html', params)\n\nclass LogoutHandler(BaseHandler):\n def get(self):\n self.auth.unset_session()\n self.redirect(self.uri_for('home'))\n\nconfig = {\n 'webapp2_extras.auth': {\n 'user_model': 'models.User',\n 'user_attributes': ['name']\n },\n 'webapp2_extras.sessions': {\n 'secret_key': 'YOUR_SECRET_KEY'\n }\n}\n\napp = webapp2.WSGIApplication([\n webapp2.Route('/', MainHandler, name='home'),\n webapp2.Route('/home', MainHandler, name='home'),\n webapp2.Route('/about', AboutHandler, name='about'),\n webapp2.Route('/trending', TrendingHandler, name='trending'),\n webapp2.Route('/tip', TipHandler, name='tip'),\n webapp2.Route('/add_credits', AddCreditsHandler, name='add_credits'),\n webapp2.Route('/get_logs', LogHandler, name='get_logs'),\n webapp2.Route('/profile', ProfileHandler, name='profile'),\n webapp2.Route('/signup', SignupHandler),\n webapp2.Route('/<type:v|p>/<user_id:\\d+>-<signup_token:.+>',\n handler=VerificationHandler, name='verification'),\n webapp2.Route('/password', SetPasswordHandler),\n webapp2.Route('/forgot', ForgotPasswordHandler, name='forgot'),\n webapp2.Route('/login', LoginHandler, name='login'),\n webapp2.Route('/logout', LogoutHandler, name='logout'),\n], debug=True, config=config)\n\nlogging.getLogger().setLevel(logging.DEBUG)\n",
"step-ids": [
40,
42,
48,
49,
51
]
}
|
[
40,
42,
48,
49,
51
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Albedo:', albedo[x[0], y[0]])
print('Albedo in RGB space:', albedo[x[0], y[0]] * 255)
<|reserved_special_token_0|>
plt.subplot(1, 2, 1)
plt.imshow(ball)
plt.subplot(1, 2, 2)
plt.imshow(albedo * shading)
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ball = plt.imread('ball.png')
albedo = plt.imread('ball_albedo.png')
shading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)
x, y, z = np.where(albedo != 0)
print('Albedo:', albedo[x[0], y[0]])
print('Albedo in RGB space:', albedo[x[0], y[0]] * 255)
albedo[np.where(albedo[:, :] != (0, 0, 0))[:-1]] = 0, 1.0, 0
plt.subplot(1, 2, 1)
plt.imshow(ball)
plt.subplot(1, 2, 2)
plt.imshow(albedo * shading)
plt.show()
<|reserved_special_token_1|>
import cv2
import matplotlib.pyplot as plt
import numpy as np
ball = plt.imread('ball.png')
albedo = plt.imread('ball_albedo.png')
shading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)
x, y, z = np.where(albedo != 0)
print('Albedo:', albedo[x[0], y[0]])
print('Albedo in RGB space:', albedo[x[0], y[0]] * 255)
albedo[np.where(albedo[:, :] != (0, 0, 0))[:-1]] = 0, 1.0, 0
plt.subplot(1, 2, 1)
plt.imshow(ball)
plt.subplot(1, 2, 2)
plt.imshow(albedo * shading)
plt.show()
<|reserved_special_token_1|>
import cv2
import matplotlib.pyplot as plt
import numpy as np
ball = plt.imread('ball.png')
albedo = plt.imread('ball_albedo.png')
shading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)
x,y,z = np.where(albedo != 0)
print('Albedo:', albedo[x[0],y[0]])
print("Albedo in RGB space:", albedo[x[0],y[0]]*255)
# conversion of shading to RGB mapped the values to [0,1], therefore (0,255,0) = (0,1,0)
albedo[np.where(albedo[:,:,] != (0,0,0))[:-1]] = (0,1.,0)
plt.subplot(1,2,1)
plt.imshow(ball)
plt.subplot(1,2,2)
plt.imshow(albedo * shading)
plt.show()
|
flexible
|
{
"blob_id": "cc6f70e328b774972e272e9600274dfd9fca93ee",
"index": 3073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Albedo:', albedo[x[0], y[0]])\nprint('Albedo in RGB space:', albedo[x[0], y[0]] * 255)\n<mask token>\nplt.subplot(1, 2, 1)\nplt.imshow(ball)\nplt.subplot(1, 2, 2)\nplt.imshow(albedo * shading)\nplt.show()\n",
"step-3": "<mask token>\nball = plt.imread('ball.png')\nalbedo = plt.imread('ball_albedo.png')\nshading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)\nx, y, z = np.where(albedo != 0)\nprint('Albedo:', albedo[x[0], y[0]])\nprint('Albedo in RGB space:', albedo[x[0], y[0]] * 255)\nalbedo[np.where(albedo[:, :] != (0, 0, 0))[:-1]] = 0, 1.0, 0\nplt.subplot(1, 2, 1)\nplt.imshow(ball)\nplt.subplot(1, 2, 2)\nplt.imshow(albedo * shading)\nplt.show()\n",
"step-4": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nball = plt.imread('ball.png')\nalbedo = plt.imread('ball_albedo.png')\nshading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)\nx, y, z = np.where(albedo != 0)\nprint('Albedo:', albedo[x[0], y[0]])\nprint('Albedo in RGB space:', albedo[x[0], y[0]] * 255)\nalbedo[np.where(albedo[:, :] != (0, 0, 0))[:-1]] = 0, 1.0, 0\nplt.subplot(1, 2, 1)\nplt.imshow(ball)\nplt.subplot(1, 2, 2)\nplt.imshow(albedo * shading)\nplt.show()\n",
"step-5": "import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nball = plt.imread('ball.png')\nalbedo = plt.imread('ball_albedo.png')\nshading = cv2.cvtColor(plt.imread('ball_shading.png'), cv2.COLOR_GRAY2RGB)\n\nx,y,z = np.where(albedo != 0)\nprint('Albedo:', albedo[x[0],y[0]])\nprint(\"Albedo in RGB space:\", albedo[x[0],y[0]]*255)\n\n# conversion of shading to RGB mapped the values to [0,1], therefore (0,255,0) = (0,1,0)\nalbedo[np.where(albedo[:,:,] != (0,0,0))[:-1]] = (0,1.,0)\n\nplt.subplot(1,2,1)\nplt.imshow(ball)\nplt.subplot(1,2,2)\nplt.imshow(albedo * shading)\n\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import PyPDF2
from pathlib import Path
def get_filenames():
"""
Get PDF files not yet reordered in the current directory
:return: list of PDF file names
"""
filenames = []
for filename in Path('.').glob('*.pdf'):
if 'reordered' not in filename.stem:
filenames.append(filename)
return filenames
def appendix_and_index_pages():
"""
Prompt user to input appendix pages (if one exists) and index pages
:return: start and end pages of the appendix and index
"""
def index_pages():
"""
Prompt user to input index pages
:return: start and end pages of index
"""
index_start = int(input('Enter the start page of your index: '))
index_end = int(input('Enter the end page of your index: '))
return index_start, index_end
is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')
if is_appendix == 'y':
appendix_start = int(input('Enter the start page of your appendix: '))
appendix_end = int(input('Enter the end page of your appendix: '))
index_start, index_end = index_pages()
else:
# When there is no appendix, set appendix start and end pages such as the page ranges of the
# appendix and the post-appendix (pre-index) will be blank, and the page range of the post-insert
# will be from the insert point to the start of the index. See def reorder for more details.
index_start, index_end = index_pages()
appendix_start = index_start
appendix_end = index_start - 1
return appendix_start, appendix_end, index_start, index_end
def yes_or_no(prompt):
"""
Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input
:param prompt: str prompting user to input their response
:return: yes or no response once user has correctly input their response
"""
response = input(prompt)
while response not in ['y', 'n']:
print('Invalid input')
response = input(prompt)
return response
def write_pages(page_range, pdf_read_object, pdf_write_object):
"""
Read pages within certain page range from the PDF read object and write those pages to the PDF write object
:param page_range: iterable containing pages to be read and written
:param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from
:param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to
:return: None, write object is modified in place.
"""
for page_num in page_range:
page = pdf_read_object.getPage(page_num)
pdf_write_object.addPage(page)
def reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end):
"""
Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name
:param filename: name of the PDF file to be reordered
:param insert_page: page in the original PDF after which the appendix and index are to be inserted
:param appendix_start: appendix start page in the original PDF
:param appendix_end: appendix end page in the original PDF
:param index_start: index start page in the original PDF
:param index_end: index end page in the original PDF
:return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF
"""
with filename.open('rb') as read_object, open(filename.stem + '_reordered.pdf', 'wb') as write_object:
pdf_read_object = PyPDF2.PdfFileReader(read_object)
pdf_write_object = PyPDF2.PdfFileWriter()
pdf_length = pdf_read_object.numPages
# Check for invalid page numbers
if insert_page < 1 or insert_page >= appendix_start:
raise ValueError('Invalid insert page')
if appendix_start != index_start and appendix_start > appendix_end:
raise ValueError('Invalid appendix start page')
if appendix_start != index_start and appendix_end >= index_start:
raise ValueError('Invalid appendix end page')
if index_start > index_end:
raise ValueError('Invalid index start page')
if index_end > pdf_length:
raise ValueError('Invalid index end page')
# Prepare page ranges to be ordered
pre_insert = range(insert_page)
post_insert = range(insert_page, appendix_start - 1)
appendix = range(appendix_start - 1, appendix_end)
post_appendix = range(appendix_end, index_start - 1)
index = range(index_start - 1, index_end)
post_index = range(index_end, pdf_length)
# Copy pages from original PDF object to new PDF object with the new ordered page ranges
for page_range in [pre_insert, index, appendix, post_insert, post_appendix, post_index]:
write_pages(page_range, pdf_read_object, pdf_write_object)
# Write ordered PDF object to PDF file
pdf_write_object.write(write_object)
def main():
while True:
print('------')
filenames = get_filenames()
if filenames:
print('Unordered PDF files in the current directory: ')
for index, filename in enumerate(filenames):
print('{}: {}'.format(index + 1, filename))
chosen_index = input('\nEnter the number of the file you want to reorder (type q to quit): ')
if chosen_index == 'q':
break
insert_page = int(input('Enter the page you want your appendix and index to come after: '))
appendix_start, appendix_end, index_start, index_end = appendix_and_index_pages()
try:
filename = filenames[int(chosen_index) - 1]
reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end)
print('\n{} reordered.'.format(filename))
except Exception as error:
print(error)
print('Restarting program\n')
continue
else:
print('No unordered PDF found in current directory')
# Ask user to reorder additional PDFs
is_continue = yes_or_no('\nDo you want to reorder another PDF (y/n)? ')
if is_continue == 'n':
break
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "2b3a42fed98b43cdd78edd751b306ba25328061a",
"index": 8652,
"step-1": "<mask token>\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\n<mask token>\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem +\n '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n for page_range in [pre_insert, index, appendix, post_insert,\n post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_filenames():\n \"\"\"\n Get PDF files not yet reordered in the current directory\n :return: list of PDF file names\n \"\"\"\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n return filenames\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem +\n '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n for page_range in [pre_insert, index, appendix, post_insert,\n post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import PyPDF2\nfrom pathlib import Path\n\n\ndef get_filenames():\n \"\"\"\n Get PDF files not yet reordered in the current directory\n :return: list of PDF file names\n \"\"\"\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n return filenames\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem +\n '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n for page_range in [pre_insert, index, appendix, post_insert,\n post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input(\n \"\"\"\nEnter the number of the file you want to reorder (type q to quit): \"\"\"\n )\n if chosen_index == 'q':\n break\n insert_page = int(input(\n 'Enter the page you want your appendix and index to come after: '\n ))\n appendix_start, appendix_end, index_start, index_end = (\n appendix_and_index_pages())\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end,\n index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import PyPDF2\nfrom pathlib import Path\n\n\ndef get_filenames():\n \"\"\"\n Get PDF files not yet reordered in the current directory\n :return: list of PDF file names\n \"\"\"\n filenames = []\n for filename in Path('.').glob('*.pdf'):\n if 'reordered' not in filename.stem:\n filenames.append(filename)\n\n return filenames\n\n\ndef appendix_and_index_pages():\n \"\"\"\n Prompt user to input appendix pages (if one exists) and index pages\n :return: start and end pages of the appendix and index\n \"\"\"\n\n def index_pages():\n \"\"\"\n Prompt user to input index pages\n :return: start and end pages of index\n \"\"\"\n index_start = int(input('Enter the start page of your index: '))\n index_end = int(input('Enter the end page of your index: '))\n return index_start, index_end\n\n is_appendix = yes_or_no('Does your book have an appendix (y/n)? ')\n\n if is_appendix == 'y':\n appendix_start = int(input('Enter the start page of your appendix: '))\n appendix_end = int(input('Enter the end page of your appendix: '))\n index_start, index_end = index_pages()\n else:\n # When there is no appendix, set appendix start and end pages such as the page ranges of the\n # appendix and the post-appendix (pre-index) will be blank, and the page range of the post-insert\n # will be from the insert point to the start of the index. See def reorder for more details.\n index_start, index_end = index_pages()\n appendix_start = index_start\n appendix_end = index_start - 1\n\n return appendix_start, appendix_end, index_start, index_end\n\n\ndef yes_or_no(prompt):\n \"\"\"\n Prompt user to answer yes or no to a prompt, and keep asking if user did not input a correct yes/no input\n :param prompt: str prompting user to input their response\n :return: yes or no response once user has correctly input their response\n \"\"\"\n response = input(prompt)\n while response not in ['y', 'n']:\n print('Invalid input')\n response = input(prompt)\n\n return response\n\n\ndef write_pages(page_range, pdf_read_object, pdf_write_object):\n \"\"\"\n Read pages within certain page range from the PDF read object and write those pages to the PDF write object\n :param page_range: iterable containing pages to be read and written\n :param pdf_read_object: PyPDF2.PdfFileReader object where pages are read from\n :param pdf_write_object: PyPDF2.PdfFileWriter object where pages are written to\n :return: None, write object is modified in place.\n \"\"\"\n for page_num in page_range:\n page = pdf_read_object.getPage(page_num)\n pdf_write_object.addPage(page)\n\n\ndef reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end):\n \"\"\"\n Reorder the appendix and index of a PDF book to another location and store the new PDF under a new name\n :param filename: name of the PDF file to be reordered\n :param insert_page: page in the original PDF after which the appendix and index are to be inserted\n :param appendix_start: appendix start page in the original PDF\n :param appendix_end: appendix end page in the original PDF\n :param index_start: index start page in the original PDF\n :param index_end: index end page in the original PDF\n :return: a reordered PDF (ending with '_reordered.pdf') in the same directory as the original PDF\n \"\"\"\n with filename.open('rb') as read_object, open(filename.stem + '_reordered.pdf', 'wb') as write_object:\n pdf_read_object = PyPDF2.PdfFileReader(read_object)\n pdf_write_object = PyPDF2.PdfFileWriter()\n pdf_length = pdf_read_object.numPages\n\n # Check for invalid page numbers\n if insert_page < 1 or insert_page >= appendix_start:\n raise ValueError('Invalid insert page')\n if appendix_start != index_start and appendix_start > appendix_end:\n raise ValueError('Invalid appendix start page')\n if appendix_start != index_start and appendix_end >= index_start:\n raise ValueError('Invalid appendix end page')\n if index_start > index_end:\n raise ValueError('Invalid index start page')\n if index_end > pdf_length:\n raise ValueError('Invalid index end page')\n\n # Prepare page ranges to be ordered\n pre_insert = range(insert_page)\n post_insert = range(insert_page, appendix_start - 1)\n appendix = range(appendix_start - 1, appendix_end)\n post_appendix = range(appendix_end, index_start - 1)\n index = range(index_start - 1, index_end)\n post_index = range(index_end, pdf_length)\n\n # Copy pages from original PDF object to new PDF object with the new ordered page ranges\n for page_range in [pre_insert, index, appendix, post_insert, post_appendix, post_index]:\n write_pages(page_range, pdf_read_object, pdf_write_object)\n\n # Write ordered PDF object to PDF file\n pdf_write_object.write(write_object)\n\n\ndef main():\n while True:\n print('------')\n filenames = get_filenames()\n if filenames:\n print('Unordered PDF files in the current directory: ')\n for index, filename in enumerate(filenames):\n print('{}: {}'.format(index + 1, filename))\n chosen_index = input('\\nEnter the number of the file you want to reorder (type q to quit): ')\n if chosen_index == 'q':\n break\n insert_page = int(input('Enter the page you want your appendix and index to come after: '))\n appendix_start, appendix_end, index_start, index_end = appendix_and_index_pages()\n\n try:\n filename = filenames[int(chosen_index) - 1]\n reorder(filename, insert_page, appendix_start, appendix_end, index_start, index_end)\n print('\\n{} reordered.'.format(filename))\n except Exception as error:\n print(error)\n print('Restarting program\\n')\n continue\n else:\n print('No unordered PDF found in current directory')\n\n # Ask user to reorder additional PDFs\n is_continue = yes_or_no('\\nDo you want to reorder another PDF (y/n)? ')\n if is_continue == 'n':\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
5,
7,
8,
9
]
}
|
[
2,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Covid(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Covid(models.Model):
states = models.CharField(max_length=100, null=True, blank=True)
affected = models.IntegerField(null=True)
cured = models.IntegerField(null=True)
death = models.IntegerField(null=True)
<|reserved_special_token_1|>
from django.db import models
class Covid(models.Model):
states = models.CharField(max_length=100, null=True, blank=True)
affected = models.IntegerField(null=True)
cured = models.IntegerField(null=True)
death = models.IntegerField(null=True)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Covid(models.Model):
states= models.CharField(max_length=100, null=True, blank=True)
affected = models.IntegerField(null=True)
cured = models.IntegerField(null=True)
death = models.IntegerField(null=True)
|
flexible
|
{
"blob_id": "284955a555ce1a727ba5041008cd0bac3c3bed49",
"index": 1283,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Covid(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Covid(models.Model):\n states = models.CharField(max_length=100, null=True, blank=True)\n affected = models.IntegerField(null=True)\n cured = models.IntegerField(null=True)\n death = models.IntegerField(null=True)\n",
"step-4": "from django.db import models\n\n\nclass Covid(models.Model):\n states = models.CharField(max_length=100, null=True, blank=True)\n affected = models.IntegerField(null=True)\n cured = models.IntegerField(null=True)\n death = models.IntegerField(null=True)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Covid(models.Model):\n states= models.CharField(max_length=100, null=True, blank=True)\n affected = models.IntegerField(null=True)\n cured = models.IntegerField(null=True)\n death = models.IntegerField(null=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BinaryTreeInorderTraversal(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BinaryTreeInorderTraversal(object):
def inorderTraversal(self, root: TreeNode) ->List[int]:
result = list()
inorder_stack = list()
while root or inorder_stack:
if root:
inorder_stack.append(root)
root = root.left
else:
root = inorder_stack.pop()
result.append(root.val)
root = root.right
return result
<|reserved_special_token_1|>
__author__ = 'yangxin_ryan'
<|reserved_special_token_0|>
class BinaryTreeInorderTraversal(object):
def inorderTraversal(self, root: TreeNode) ->List[int]:
result = list()
inorder_stack = list()
while root or inorder_stack:
if root:
inorder_stack.append(root)
root = root.left
else:
root = inorder_stack.pop()
result.append(root.val)
root = root.right
return result
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Solutions:
题目要求非递归的中序遍历,
中序遍历的意思其实就是先遍历左孩子、然后是根结点、最后是右孩子。我们按照这个逻辑,应该先循环到root的最左孩子,
然后依次出栈,然后将结果放入结果集合result,然后是根的val,然后右孩子。
"""
class BinaryTreeInorderTraversal(object):
def inorderTraversal(self, root: TreeNode) -> List[int]:
result = list()
inorder_stack = list()
while root or inorder_stack:
if root:
inorder_stack.append(root)
root = root.left
else:
root = inorder_stack.pop()
result.append(root.val)
root = root.right
return result
|
flexible
|
{
"blob_id": "8e629ee53f11e29aa026763508d13b06f6ced5ba",
"index": 940,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BinaryTreeInorderTraversal(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BinaryTreeInorderTraversal(object):\n\n def inorderTraversal(self, root: TreeNode) ->List[int]:\n result = list()\n inorder_stack = list()\n while root or inorder_stack:\n if root:\n inorder_stack.append(root)\n root = root.left\n else:\n root = inorder_stack.pop()\n result.append(root.val)\n root = root.right\n return result\n",
"step-4": "__author__ = 'yangxin_ryan'\n<mask token>\n\n\nclass BinaryTreeInorderTraversal(object):\n\n def inorderTraversal(self, root: TreeNode) ->List[int]:\n result = list()\n inorder_stack = list()\n while root or inorder_stack:\n if root:\n inorder_stack.append(root)\n root = root.left\n else:\n root = inorder_stack.pop()\n result.append(root.val)\n root = root.right\n return result\n",
"step-5": "# -*- coding:utf-8 -*-\n__author__ = 'yangxin_ryan'\n\"\"\"\nSolutions:\n题目要求非递归的中序遍历,\n中序遍历的意思其实就是先遍历左孩子、然后是根结点、最后是右孩子。我们按照这个逻辑,应该先循环到root的最左孩子,\n然后依次出栈,然后将结果放入结果集合result,然后是根的val,然后右孩子。\n\"\"\"\n\n\nclass BinaryTreeInorderTraversal(object):\n\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n result = list()\n inorder_stack = list()\n while root or inorder_stack:\n if root:\n inorder_stack.append(root)\n root = root.left\n else:\n root = inorder_stack.pop()\n result.append(root.val)\n root = root.right\n return result\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(sum([(row[lineNumber * 3 % len(row)] == '#') for lineNumber, row in
enumerate(open('input.txt').read().splitlines())]))
<|reserved_special_token_1|>
#!/usr/bin/env python3
print(sum([row[lineNumber * 3 % len(row)] == '#' for lineNumber, row in enumerate(open('input.txt').read().splitlines())]))
|
flexible
|
{
"blob_id": "b2fecadbd99edb89379f82a935aa1622f043eeac",
"index": 9099,
"step-1": "<mask token>\n",
"step-2": "print(sum([(row[lineNumber * 3 % len(row)] == '#') for lineNumber, row in\n enumerate(open('input.txt').read().splitlines())]))\n",
"step-3": "#!/usr/bin/env python3\n\nprint(sum([row[lineNumber * 3 % len(row)] == '#' for lineNumber, row in enumerate(open('input.txt').read().splitlines())]))",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
def plot(actual, prediction):
plt.figure(figsize=(16,6))
plt.plot(actual, label='Actual',color='b',linewidth=3)
plt.plot((prediction), label='Prediction',color='y')
print("Plotting")
plt.legend()
plt.show()
timesteps = 2
params = 5
samples = 500000
# load dataset
dataset = read_csv('merged.csv', header=0, usecols = ['time', 'src', 'dst', 'length', 'protocol', 'people'])
values = dataset.values
encoder = LabelEncoder()
values[:,5] = encoder.fit_transform(values[:,5])
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
labels = scaled.copy()
scaled = np.delete(scaled, 5, axis=1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = scaler.fit_transform(labels)
labels = labels[:(samples/timesteps)]
scaled = scaled[:samples]
reframed = np.reshape(scaled,(samples, params))
values = np.reshape(reframed,((samples/timesteps), timesteps,-1))
size = ((len(values))/timesteps)
sizeL = ((len(labels))/timesteps)
test_X = values[:size]
test_y = labels[:sizeL]
model = load_model("test50.h5")
#predicts
yhat = model.predict(test_X)
plot(test_y, yhat)
|
normal
|
{
"blob_id": "11984027baf6d4c97b2976e4ac49a0e8ec62f893",
"index": 8709,
"step-1": "from math import sqrt\nfrom numpy import concatenate\nfrom matplotlib import pyplot\nfrom pandas import read_csv\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import mean_squared_error\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.models import load_model\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef plot(actual, prediction):\n\tplt.figure(figsize=(16,6))\n\tplt.plot(actual, label='Actual',color='b',linewidth=3)\n\tplt.plot((prediction), label='Prediction',color='y') \n\tprint(\"Plotting\")\n plt.legend()\n plt.show()\n\ntimesteps = 2\nparams = 5\nsamples = 500000\n\n# load dataset\ndataset = read_csv('merged.csv', header=0, usecols = ['time', 'src', 'dst', 'length', 'protocol', 'people'])\nvalues = dataset.values\n\nencoder = LabelEncoder()\nvalues[:,5] = encoder.fit_transform(values[:,5])\n\nvalues = values.astype('float32')\n\n# normalize features\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled = scaler.fit_transform(values)\n\nlabels = scaled.copy()\nscaled = np.delete(scaled, 5, axis=1)\nlabels = np.delete(labels, 0, axis =1)\nlabels = np.delete(labels, 0, axis =1)\nlabels = np.delete(labels, 0, axis =1)\nlabels = np.delete(labels, 0, axis =1)\nlabels = np.delete(labels, 0, axis =1)\nlabels = scaler.fit_transform(labels)\n\nlabels = labels[:(samples/timesteps)]\n\nscaled = scaled[:samples]\nreframed = np.reshape(scaled,(samples, params))\nvalues = np.reshape(reframed,((samples/timesteps), timesteps,-1))\n\nsize = ((len(values))/timesteps)\nsizeL = ((len(labels))/timesteps)\n\ntest_X = values[:size]\ntest_y = labels[:sizeL]\n\nmodel = load_model(\"test50.h5\")\n\n#predicts\nyhat = model.predict(test_X)\nplot(test_y, yhat)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@app.route('/<sensor_id>', methods=['GET'])
def sensor_details(sensor_id):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0:
return {'sensor': sensors[sensor_pos]}
else:
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def hello_world():
for sensor in sensors:
sensor.update()
return {'sensors': sensors}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/<sensor_id>', methods=['GET'])
def sensor_details(sensor_id):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0:
return {'sensor': sensors[sensor_pos]}
else:
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
@app.route('/<sensor_id>/toggle', methods=['PUT'])
def set_sensor_value(sensor_id: str):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):
sensors[sensor_pos].toggle()
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
@app.route('/', methods=['GET'])
def hello_world():
for sensor in sensors:
sensor.update()
return {'sensors': sensors}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def search_index_by_id(sensor_id: str):
for pos, sensor in enumerate(sensors):
if sensor.id == sensor_id:
return pos
return -1
@app.route('/<sensor_id>', methods=['GET'])
def sensor_details(sensor_id):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0:
return {'sensor': sensors[sensor_pos]}
else:
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
@app.route('/<sensor_id>/toggle', methods=['PUT'])
def set_sensor_value(sensor_id: str):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):
sensors[sensor_pos].toggle()
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
@app.route('/', methods=['GET'])
def hello_world():
for sensor in sensors:
sensor.update()
return {'sensors': sensors}
<|reserved_special_token_1|>
from flask import Flask
from sim.toggle import ToggleSensor
from sim.sensor import Sensor
app = Flask(__name__)
sensors = [ToggleSensor(id='s-01', description='lampadina'), ToggleSensor(
id='s-02', description='lampadina'), ToggleSensor(id='s-03',
description='allarme atomico'), ToggleSensor(id='s-04', description=
'porta aperta'), Sensor(id='temperature-01', description=
'sensore di temperatura'), Sensor(id='umidita-01', description=
'sensore di umiditá'), Sensor(id='cleancode-01', description=
'sensore di bellezza del codice'), Sensor(id='luce-01', description=
'sensore di luce')]
def search_index_by_id(sensor_id: str):
for pos, sensor in enumerate(sensors):
if sensor.id == sensor_id:
return pos
return -1
@app.route('/<sensor_id>', methods=['GET'])
def sensor_details(sensor_id):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0:
return {'sensor': sensors[sensor_pos]}
else:
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
@app.route('/<sensor_id>/toggle', methods=['PUT'])
def set_sensor_value(sensor_id: str):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):
sensors[sensor_pos].toggle()
return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}
@app.route('/', methods=['GET'])
def hello_world():
for sensor in sensors:
sensor.update()
return {'sensors': sensors}
<|reserved_special_token_1|>
from flask import Flask
from sim.toggle import ToggleSensor
from sim.sensor import Sensor
app = Flask(__name__)
sensors = [
ToggleSensor(id="s-01", description="lampadina"),
ToggleSensor(id="s-02", description="lampadina"),
ToggleSensor(id="s-03", description="allarme atomico"),
ToggleSensor(id="s-04", description="porta aperta"),
Sensor(id="temperature-01", description="sensore di temperatura"),
Sensor(id="umidita-01", description="sensore di umiditá"),
Sensor(id="cleancode-01", description="sensore di bellezza del codice"),
Sensor(id="luce-01", description="sensore di luce"),
]
def search_index_by_id(sensor_id: str):
for pos, sensor in enumerate(sensors):
if sensor.id == sensor_id:
return pos
return -1
@app.route("/<sensor_id>", methods=["GET"])
def sensor_details(sensor_id):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0:
return {"sensor": sensors[sensor_pos]}
else:
return {"kind": "error", "payload": f"Sensor {sensor_id} not found"}
@app.route("/<sensor_id>/toggle", methods=["PUT"])
def set_sensor_value(sensor_id: str):
sensor_pos = search_index_by_id(sensor_id)
if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):
sensors[sensor_pos].toggle()
return {"kind": "error", "payload": f"Sensor {sensor_id} not found"}
@app.route("/", methods=["GET"])
def hello_world():
for sensor in sensors:
sensor.update()
return {"sensors": sensors}
|
flexible
|
{
"blob_id": "2843845848747c723d670cd3a5fcb7127153ac7e",
"index": 264,
"step-1": "<mask token>\n\n\n@app.route('/<sensor_id>', methods=['GET'])\ndef sensor_details(sensor_id):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0:\n return {'sensor': sensors[sensor_pos]}\n else:\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n<mask token>\n\n\n@app.route('/', methods=['GET'])\ndef hello_world():\n for sensor in sensors:\n sensor.update()\n return {'sensors': sensors}\n",
"step-2": "<mask token>\n\n\n@app.route('/<sensor_id>', methods=['GET'])\ndef sensor_details(sensor_id):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0:\n return {'sensor': sensors[sensor_pos]}\n else:\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n@app.route('/<sensor_id>/toggle', methods=['PUT'])\ndef set_sensor_value(sensor_id: str):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):\n sensors[sensor_pos].toggle()\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n@app.route('/', methods=['GET'])\ndef hello_world():\n for sensor in sensors:\n sensor.update()\n return {'sensors': sensors}\n",
"step-3": "<mask token>\n\n\ndef search_index_by_id(sensor_id: str):\n for pos, sensor in enumerate(sensors):\n if sensor.id == sensor_id:\n return pos\n return -1\n\n\n@app.route('/<sensor_id>', methods=['GET'])\ndef sensor_details(sensor_id):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0:\n return {'sensor': sensors[sensor_pos]}\n else:\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n@app.route('/<sensor_id>/toggle', methods=['PUT'])\ndef set_sensor_value(sensor_id: str):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):\n sensors[sensor_pos].toggle()\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n@app.route('/', methods=['GET'])\ndef hello_world():\n for sensor in sensors:\n sensor.update()\n return {'sensors': sensors}\n",
"step-4": "from flask import Flask\nfrom sim.toggle import ToggleSensor\nfrom sim.sensor import Sensor\napp = Flask(__name__)\nsensors = [ToggleSensor(id='s-01', description='lampadina'), ToggleSensor(\n id='s-02', description='lampadina'), ToggleSensor(id='s-03',\n description='allarme atomico'), ToggleSensor(id='s-04', description=\n 'porta aperta'), Sensor(id='temperature-01', description=\n 'sensore di temperatura'), Sensor(id='umidita-01', description=\n 'sensore di umiditá'), Sensor(id='cleancode-01', description=\n 'sensore di bellezza del codice'), Sensor(id='luce-01', description=\n 'sensore di luce')]\n\n\ndef search_index_by_id(sensor_id: str):\n for pos, sensor in enumerate(sensors):\n if sensor.id == sensor_id:\n return pos\n return -1\n\n\n@app.route('/<sensor_id>', methods=['GET'])\ndef sensor_details(sensor_id):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0:\n return {'sensor': sensors[sensor_pos]}\n else:\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n@app.route('/<sensor_id>/toggle', methods=['PUT'])\ndef set_sensor_value(sensor_id: str):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):\n sensors[sensor_pos].toggle()\n return {'kind': 'error', 'payload': f'Sensor {sensor_id} not found'}\n\n\n@app.route('/', methods=['GET'])\ndef hello_world():\n for sensor in sensors:\n sensor.update()\n return {'sensors': sensors}\n",
"step-5": "from flask import Flask\n\nfrom sim.toggle import ToggleSensor\nfrom sim.sensor import Sensor\n\napp = Flask(__name__)\n\nsensors = [\n ToggleSensor(id=\"s-01\", description=\"lampadina\"),\n ToggleSensor(id=\"s-02\", description=\"lampadina\"),\n ToggleSensor(id=\"s-03\", description=\"allarme atomico\"),\n ToggleSensor(id=\"s-04\", description=\"porta aperta\"),\n Sensor(id=\"temperature-01\", description=\"sensore di temperatura\"),\n Sensor(id=\"umidita-01\", description=\"sensore di umiditá\"),\n Sensor(id=\"cleancode-01\", description=\"sensore di bellezza del codice\"),\n Sensor(id=\"luce-01\", description=\"sensore di luce\"),\n]\n\n\ndef search_index_by_id(sensor_id: str):\n for pos, sensor in enumerate(sensors):\n if sensor.id == sensor_id:\n return pos\n\n return -1\n\n\n@app.route(\"/<sensor_id>\", methods=[\"GET\"])\ndef sensor_details(sensor_id):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0:\n return {\"sensor\": sensors[sensor_pos]}\n else:\n return {\"kind\": \"error\", \"payload\": f\"Sensor {sensor_id} not found\"}\n\n\n@app.route(\"/<sensor_id>/toggle\", methods=[\"PUT\"])\ndef set_sensor_value(sensor_id: str):\n sensor_pos = search_index_by_id(sensor_id)\n if sensor_pos >= 0 and isinstance(sensors[sensor_pos], ToggleSensor):\n sensors[sensor_pos].toggle()\n return {\"kind\": \"error\", \"payload\": f\"Sensor {sensor_id} not found\"}\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef hello_world():\n for sensor in sensors:\n sensor.update()\n return {\"sensors\": sensors}\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
from flask import Flask, render_template, redirect, request, session
app = Flask(__name__)
app.secret_key = 'ThisIsSecret' #this line is always needed when using the import 'session'
@app.route('/') #methods=['GET'] by default
def index():
return render_template('index.html')
@app.route('/ninja')
def ninja():
return render_template('ninja.html')
@app.route('/ninja/<username>') #great example of using a variable in an html and the image needed on that specific page
def show_user_profile(username):
print username
return render_template('blue.html', username=username)
@app.errorhandler(404) #modifies your 404 url not found page to whatever you have on your html file
def page_not_found(e):
return render_template('404notFound.html'), 404
# @app.route('/ninja/blue')
# def blue():
# return render_template('blue.html')
# @app.route('/ninja/red')
# def red():
# return render_template('red.html')
# @app.route('/ninja/purple')
# def purple():
# return render_template('purple.html')
# @app.route('/ninja/orange')
# def orange():
# return render_template('orange.html')
app.run(debug=True)
|
normal
|
{
"blob_id": "001198459b038186ab784b6a9bed755924784866",
"index": 4687,
"step-1": "from flask import Flask, render_template, redirect, request, session\r\n\r\napp = Flask(__name__)\r\napp.secret_key = 'ThisIsSecret' #this line is always needed when using the import 'session'\r\n\r\n\r\n@app.route('/') #methods=['GET'] by default\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/ninja')\r\ndef ninja():\r\n return render_template('ninja.html')\r\n\r\n\r\n@app.route('/ninja/<username>') #great example of using a variable in an html and the image needed on that specific page\r\ndef show_user_profile(username):\r\n print username\r\n return render_template('blue.html', username=username)\r\n\r\n@app.errorhandler(404) #modifies your 404 url not found page to whatever you have on your html file\r\ndef page_not_found(e):\r\n return render_template('404notFound.html'), 404\r\n\r\n\r\n\r\n# @app.route('/ninja/blue')\r\n# def blue():\r\n# return render_template('blue.html')\r\n\r\n# @app.route('/ninja/red')\r\n# def red():\r\n# return render_template('red.html')\r\n\r\n# @app.route('/ninja/purple')\r\n# def purple():\r\n# return render_template('purple.html')\r\n\r\n# @app.route('/ninja/orange')\r\n# def orange():\r\n# return render_template('orange.html')\r\n\r\napp.run(debug=True)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('student_registrations.json', 'w') as f:
f.write(e)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s1 = ex1.load_course_registrations('data.txt')
s1 = map(asdict, s1)
e = json.dumps(list(s1))
with open('student_registrations.json', 'w') as f:
f.write(e)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from dataclasses import asdict
from json import dumps
from dataclasses import dataclass
from typing import List, Dict
import json
import ex1
s1 = ex1.load_course_registrations('data.txt')
s1 = map(asdict, s1)
e = json.dumps(list(s1))
with open('student_registrations.json', 'w') as f:
f.write(e)
<|reserved_special_token_1|>
'''Lab01 ex4
E/16/319 Rathnayake R.P.V.N'''
from dataclasses import asdict
from json import dumps
from dataclasses import dataclass
from typing import List, Dict
import json
import ex1 #import the ex1 to get the lord_course_registraion function
s1=ex1.load_course_registrations("data.txt") #lord the list of Student object in to the s1
s1=(map(asdict,s1)) #aply asdict() to s1 my useng the map function
e=json.dumps(list(s1)) #convert into jsom=n string
#print(e)
with open("student_registrations.json","w") as f: #open json file and write on it
f.write(e)
|
flexible
|
{
"blob_id": "8a5ade450485f9114fa91c00c7588535ccbaf0e6",
"index": 1923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('student_registrations.json', 'w') as f:\n f.write(e)\n",
"step-3": "<mask token>\ns1 = ex1.load_course_registrations('data.txt')\ns1 = map(asdict, s1)\ne = json.dumps(list(s1))\nwith open('student_registrations.json', 'w') as f:\n f.write(e)\n",
"step-4": "<mask token>\nfrom dataclasses import asdict\nfrom json import dumps\nfrom dataclasses import dataclass\nfrom typing import List, Dict\nimport json\nimport ex1\ns1 = ex1.load_course_registrations('data.txt')\ns1 = map(asdict, s1)\ne = json.dumps(list(s1))\nwith open('student_registrations.json', 'w') as f:\n f.write(e)\n",
"step-5": "'''Lab01 ex4\n\tE/16/319 Rathnayake R.P.V.N'''\nfrom dataclasses import asdict\nfrom json import dumps\nfrom dataclasses import dataclass\nfrom typing import List, Dict\nimport json\nimport ex1\t\t#import the ex1 to get the lord_course_registraion function\n\n\ns1=ex1.load_course_registrations(\"data.txt\")\t#lord the list of Student object in to the s1\ns1=(map(asdict,s1))\t\t\t\t\t\t\t\t#aply asdict() to s1 my useng the map function\n\ne=json.dumps(list(s1))\t\t\t\t\t\t\t#convert into jsom=n string\n#print(e)\nwith open(\"student_registrations.json\",\"w\") as f:\t\t#open json file and write on it\n\tf.write(e)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(result)
<|reserved_special_token_1|>
a = 1 + 2 * 3
a = 1 or 2 and 3
result = 1 < 2 < 3
result = 10 < 20 > 15
print(result)
<|reserved_special_token_1|>
# 运算符的优先级
# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减
# 运算符的优先级可以根据优先级的表格来查询,
# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算
# 如果优先级一样则自左向右计算
# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记
# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序
a = 1 + 2 * 3
# 一样 and高 or高
# 如果or的优先级高,或者两个运算符的优先级一样高
# 则需要先进行或运算,则运算结果是3
# 如果and的优先级高,则应该先计算与运算
# 则运算结果是1
a = 1 or 2 and 3
# print(a)
# 逻辑运算符(补充)
# 逻辑运算符可以连着使用
result = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3
result = 10 < 20 > 15
print(result)
|
flexible
|
{
"blob_id": "25550cbaf6e0e5bdbbe3852bb8cdc05ac300d315",
"index": 8872,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(result)\n",
"step-3": "a = 1 + 2 * 3\na = 1 or 2 and 3\nresult = 1 < 2 < 3\nresult = 10 < 20 > 15\nprint(result)\n",
"step-4": "# 运算符的优先级\n# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减\n# 运算符的优先级可以根据优先级的表格来查询,\n# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算\n# 如果优先级一样则自左向右计算\n# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记\n# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序\na = 1 + 2 * 3\n\n# 一样 and高 or高\n# 如果or的优先级高,或者两个运算符的优先级一样高\n# 则需要先进行或运算,则运算结果是3\n# 如果and的优先级高,则应该先计算与运算\n# 则运算结果是1\na = 1 or 2 and 3\n\n# print(a)\n\n# 逻辑运算符(补充)\n# 逻辑运算符可以连着使用\nresult = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3\nresult = 10 < 20 > 15\n\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from config import Config
import numpy as np
from itertools import product
from sklearn.utils import shuffle
from sklearn.metrics import precision_recall_fscore_support
from keras import callbacks, regularizers
from keras.models import Sequential
from keras.layers import Dense, InputLayer
from keras import backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
from src.classification_data_tools import limit_negative_samples
import pickle
from tensorflow import set_random_seed
import tensorflow as tf
cfg = Config()
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw,
batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')
f1 = 2 * ((precision * recall) / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')
f1 = 2 * ((precision * recall) / (precision + recall))
return precision, recall, f1
#estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1)
#kfold = StratifiedKFold(n_splits=10, shuffle=True)
#results = cross_val_score(estimator, test_features, test_targets, cv=kfold)
#print("Results: %.2f%% (%.2f%%)" % (results.mean() * 100, results.std() * 100))
training_X, training_y, test_X, test_Y = FetchData(cfg)
training_features = np.array(training_X)
training_targets = np.array(training_y)
test_features = np.array(test_X)
test_targets = np.array(test_Y)
input_shape = (len(training_features[0]),)
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat = i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight})
precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
|
normal
|
{
"blob_id": "957e18b2536cda69ba1db571d0308d5e392fe488",
"index": 2166,
"step-1": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\n<mask token>\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n count_max = 0\n counter = 0\n architecture_list = []\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat=i))\n architecture_list.extend(prod)\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg\n .TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n print(str(counter) + '/' + str(count_max))\n model = BuildModel(cfg, input_shape, True, list(\n architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model,\n training_features, training_targets, {(0): 1.0,\n (1): class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg,\n model_trained, test_features, test_targets)\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +\n ',' + str(la4) + ',' + str(la5) + ',' + str(\n class_weight) + ',' + regularizer + ',' +\n activation_function + ',' + str(precision) +\n ',' + str(recall) + ',' + str(f1) + '\\n')\n counter += 1\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(\n best_recall) + ', f1: ' + str(best_f1))\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg\n .CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n",
"step-4": "<mask token>\ncfg = Config()\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\ntraining_X, training_y, test_X, test_Y = FetchData(cfg)\ntraining_features = np.array(training_X)\ntraining_targets = np.array(training_y)\ntest_features = np.array(test_X)\ntest_targets = np.array(test_Y)\ninput_shape = len(training_features[0]),\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n count_max = 0\n counter = 0\n architecture_list = []\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat=i))\n architecture_list.extend(prod)\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg\n .TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n print(str(counter) + '/' + str(count_max))\n model = BuildModel(cfg, input_shape, True, list(\n architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model,\n training_features, training_targets, {(0): 1.0,\n (1): class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg,\n model_trained, test_features, test_targets)\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +\n ',' + str(la4) + ',' + str(la5) + ',' + str(\n class_weight) + ',' + regularizer + ',' +\n activation_function + ',' + str(precision) +\n ',' + str(recall) + ',' + str(f1) + '\\n')\n counter += 1\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(\n best_recall) + ', f1: ' + str(best_f1))\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg\n .CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n",
"step-5": "from config import Config\nimport numpy as np\nfrom itertools import product\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom keras import callbacks, regularizers\nfrom keras.models import Sequential\nfrom keras.layers import Dense, InputLayer\nfrom keras import backend as K\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nfrom src.classification_data_tools import limit_negative_samples\nimport pickle\nfrom tensorflow import set_random_seed\nimport tensorflow as tf\n\ncfg = Config()\n\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO)\n\n return training_features, training_targets, test_features, test_targets\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n\n model = Sequential()\n\n model.add(InputLayer(input_shape))\n\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n\n return model\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw,\n batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n\n return model\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n\n precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')\n f1 = 2 * ((precision * recall) / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n\n precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')\n f1 = 2 * ((precision * recall) / (precision + recall))\n return precision, recall, f1\n\n #estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1)\n #kfold = StratifiedKFold(n_splits=10, shuffle=True)\n #results = cross_val_score(estimator, test_features, test_targets, cv=kfold)\n #print(\"Results: %.2f%% (%.2f%%)\" % (results.mean() * 100, results.std() * 100))\n\n\ntraining_X, training_y, test_X, test_Y = FetchData(cfg)\n\ntraining_features = np.array(training_X)\ntraining_targets = np.array(training_y)\ntest_features = np.array(test_X)\ntest_targets = np.array(test_Y)\n\ninput_shape = (len(training_features[0]),)\n\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n\n count_max = 0\n counter = 0\n\n architecture_list = []\n\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat = i))\n architecture_list.extend(prod)\n\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n\n print(str(counter) + '/' + str(count_max))\n\n model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets)\n\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n\n\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\\n')\n\n counter += 1\n\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1))\n\n\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n\n\n\n\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
def most_expensive_item(products):
return max(products.items(), key=lambda p: p[1])[0]
|
normal
|
{
"blob_id": "f1e335d0187aeb78d857bc523eb33221fd2e7e6d",
"index": 7148,
"step-1": "<mask token>\n",
"step-2": "def most_expensive_item(products):\n return max(products.items(), key=lambda p: p[1])[0]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 17:38:50 2019
@author: User
"""
import numpy as np
import pandas as pd
dataset = pd.read_csv('University_data.csv')
print(dataset.info())
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, -1:].values
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
features[:, 0] = labelencoder.fit_transform(features[:, 0])
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
features = onehotencoder.fit_transform(features).toarray()
features = features[:, 1:]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(features, labels)
x = ["Cabrini",337,1.5,2.3,9.0,0]
x = np.array(x).reshape(1,-1)
x[:,0] = labelencoder.transform(x[:,0])
x = onehotencoder.transform(x).toarray()
x = x[:,1:]
regressor.predict(x)
|
normal
|
{
"blob_id": "94e8f0532da76c803b23fe2217b07dc8cf285710",
"index": 950,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dataset.info())\n<mask token>\nregressor.fit(features, labels)\n<mask token>\nregressor.predict(x)\n",
"step-3": "<mask token>\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\n<mask token>\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\n<mask token>\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n<mask token>\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 17:38:50 2019\n\n@author: User\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv') \nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values \nlabels = dataset.iloc[:, -1:].values \nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features = [0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n\n\nfrom sklearn.linear_model import LinearRegression \nregressor = LinearRegression() \nregressor.fit(features, labels)\n\n\n\nx = [\"Cabrini\",337,1.5,2.3,9.0,0]\nx = np.array(x).reshape(1,-1)\nx[:,0] = labelencoder.transform(x[:,0])\nx = onehotencoder.transform(x).toarray()\nx = x[:,1:]\nregressor.predict(x)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Swking
@File : ZDT.py
@Date : 2018/12/28
@Desc :
"""
import numpy as np
class ZDT1:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - ((Y[0] / g)**0.5))
return Y
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - (np.sqrt(Y[0] / g)))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)
g = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
if __name__ == '__main__':
zdt = ZDT1()
print(zdt.Func(np.ones(zdt.dimension)))
|
normal
|
{
"blob_id": "8ca16947054b681a5f43d8b8029191d031d3a218",
"index": 8352,
"step-1": "<mask token>\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ZDT1:\n <mask token>\n <mask token>\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ZDT1:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 0.5)\n return Y\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nif __name__ == '__main__':\n zdt = ZDT1()\n print(zdt.Func(np.ones(zdt.dimension)))\n",
"step-4": "<mask token>\nimport numpy as np\n\n\nclass ZDT1:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 0.5)\n return Y\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nif __name__ == '__main__':\n zdt = ZDT1()\n print(zdt.Func(np.ones(zdt.dimension)))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Swking\n@File : ZDT.py\n@Date : 2018/12/28\n@Desc : \n\"\"\"\nimport numpy as np\nclass ZDT1:\n\tdef __init__(self):\n\t\tself.dimension = 30\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - ((Y[0] / g)**0.5))\n\t\treturn Y\n\nclass ZDT2:\n\tdef __init__(self):\n\t\tself.dimension = 30\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - (Y[0] / g) ** 2)\n\t\treturn Y\n\n\nclass ZDT3:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))\n\t\treturn Y\n\n\nclass ZDT4:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension) - 5\n\t\tself.min[0] = 0\n\t\tself.max = np.zeros(self.dimension) + 5\n\t\tself.max[0] = 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))\n\t\tY[1] = g * (1 - (np.sqrt(Y[0] / g)))\n\t\treturn Y\n\n\nclass ZDT6:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)\n\t\tg = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)\n\t\tY[1] = g * (1 - (Y[0] / g) ** 2)\n\t\treturn Y\n\nif __name__ == '__main__':\n\tzdt = ZDT1()\n\tprint(zdt.Func(np.ones(zdt.dimension)))",
"step-ids": [
12,
13,
16,
17,
18
]
}
|
[
12,
13,
16,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
db.集合.update()
"""
"""
实例 被替换了
> db.test1000.update({'name':'dapeng'},{'name':'大鹏'})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test1000.find()
{ "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" }
{ "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 }
{ "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "kongming", "age" : 12 }
{ "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 }
{ "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 }
>
"""
"""
实例2 利用$set:只修改匹配到的值
> db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}})
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
> db.test1000.find()
{ "_id" : ObjectId("5c35549d7ad0cf935d3c150d"), "name" : "大鹏" }
{ "_id" : ObjectId("5c3554f37ad0cf935d3c150e"), "nInserted" : 1 }
{ "_id" : ObjectId("5c3555417ad0cf935d3c150f"), "name" : "空明被修改", "age" : 12 }
{ "_id" : ObjectId("5c3555457ad0cf935d3c1510"), "name" : "kongming1", "age" : 12 }
{ "_id" : ObjectId("5c3555557ad0cf935d3c1511"), "name" : "kongming1", "age" : 12 }
>
"""
"""
实例3 修改多条
db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true})
"""
|
flexible
|
{
"blob_id": "7d8c2aa5674704d4443034c29bbdc715da9fd567",
"index": 5022,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\ndb.集合.update()\n\n\"\"\"\n\"\"\"\n实例 被替换了\n> db.test1000.update({'name':'dapeng'},{'name':'大鹏'})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\n> db.test1000.find()\n{ \"_id\" : ObjectId(\"5c35549d7ad0cf935d3c150d\"), \"name\" : \"大鹏\" }\n{ \"_id\" : ObjectId(\"5c3554f37ad0cf935d3c150e\"), \"nInserted\" : 1 }\n{ \"_id\" : ObjectId(\"5c3555417ad0cf935d3c150f\"), \"name\" : \"kongming\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555457ad0cf935d3c1510\"), \"name\" : \"kongming1\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555557ad0cf935d3c1511\"), \"name\" : \"kongming1\", \"age\" : 12 }\n> \n\"\"\"\n\n\"\"\"\n实例2 利用$set:只修改匹配到的值\n> db.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}})\nWriteResult({ \"nMatched\" : 1, \"nUpserted\" : 0, \"nModified\" : 1 })\n> db.test1000.find()\n{ \"_id\" : ObjectId(\"5c35549d7ad0cf935d3c150d\"), \"name\" : \"大鹏\" }\n{ \"_id\" : ObjectId(\"5c3554f37ad0cf935d3c150e\"), \"nInserted\" : 1 }\n{ \"_id\" : ObjectId(\"5c3555417ad0cf935d3c150f\"), \"name\" : \"空明被修改\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555457ad0cf935d3c1510\"), \"name\" : \"kongming1\", \"age\" : 12 }\n{ \"_id\" : ObjectId(\"5c3555557ad0cf935d3c1511\"), \"name\" : \"kongming1\", \"age\" : 12 }\n> \n\n\"\"\"\n\"\"\"\n实例3 修改多条\ndb.test1000.update({'name':'kongming'},{$set:{'name':'空明被修改'}},{multi:true})\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class SfCrawlSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SfCrawlSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def start_requests(self):
for page in range(1, 100):
urls = (
'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'
.format(page))
yield Request(url=urls, headers=self.header, callback=self.parse)
def parse(self, response):
datas = json.loads(response.text)['data']
if datas and len(datas) > 0:
for data in datas:
name = data['user'][2]
user_url = 'https://segmentfault.com' + data['user'][3]
id = data['user_id']
title = data['title']
excerpt = data['excerpt']
date = data['createdDate']
views = data['viewsWord']
votes = data['votes']
a_url = str('https://segmentfault.com' + data['url'])
item = SfItem(name=name, user_url=user_url, id=id, title=
title, excerpt=excerpt, date=date, views=views, votes=votes
)
yield Request(url=a_url, headers=self.header, callback=self
.parse_content, meta={'keys': item})
def parse_content(self, response):
item = response.meta['keys']
sel = parsel.Selector(response.text)
item['blog_name'] = sel.xpath(
'//div[@class="article__authormeta"]/a[2]/text()').extract_first()
item['blog_url'] = sel.xpath(
'//div[@class="article__authormeta"]/a[2]/@href').extract_first()
item['content'] = sel.xpath('div[@class="row"]/text()').extract_first()
yield item
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SfCrawlSpider(scrapy.Spider):
name = 'sf_crawl'
allowed_domains = ['segmentfault.com']
header = {'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
, 'referer': 'https://segmentfault.com/', 'content-type':
'application/json; charset=UTF-8'}
def start_requests(self):
for page in range(1, 100):
urls = (
'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'
.format(page))
yield Request(url=urls, headers=self.header, callback=self.parse)
def parse(self, response):
datas = json.loads(response.text)['data']
if datas and len(datas) > 0:
for data in datas:
name = data['user'][2]
user_url = 'https://segmentfault.com' + data['user'][3]
id = data['user_id']
title = data['title']
excerpt = data['excerpt']
date = data['createdDate']
views = data['viewsWord']
votes = data['votes']
a_url = str('https://segmentfault.com' + data['url'])
item = SfItem(name=name, user_url=user_url, id=id, title=
title, excerpt=excerpt, date=date, views=views, votes=votes
)
yield Request(url=a_url, headers=self.header, callback=self
.parse_content, meta={'keys': item})
def parse_content(self, response):
item = response.meta['keys']
sel = parsel.Selector(response.text)
item['blog_name'] = sel.xpath(
'//div[@class="article__authormeta"]/a[2]/text()').extract_first()
item['blog_url'] = sel.xpath(
'//div[@class="article__authormeta"]/a[2]/@href').extract_first()
item['content'] = sel.xpath('div[@class="row"]/text()').extract_first()
yield item
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
import parsel
import scrapy
from scrapy import Request
from SF.items import SfItem
class SfCrawlSpider(scrapy.Spider):
name = 'sf_crawl'
allowed_domains = ['segmentfault.com']
header = {'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
, 'referer': 'https://segmentfault.com/', 'content-type':
'application/json; charset=UTF-8'}
def start_requests(self):
for page in range(1, 100):
urls = (
'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'
.format(page))
yield Request(url=urls, headers=self.header, callback=self.parse)
def parse(self, response):
datas = json.loads(response.text)['data']
if datas and len(datas) > 0:
for data in datas:
name = data['user'][2]
user_url = 'https://segmentfault.com' + data['user'][3]
id = data['user_id']
title = data['title']
excerpt = data['excerpt']
date = data['createdDate']
views = data['viewsWord']
votes = data['votes']
a_url = str('https://segmentfault.com' + data['url'])
item = SfItem(name=name, user_url=user_url, id=id, title=
title, excerpt=excerpt, date=date, views=views, votes=votes
)
yield Request(url=a_url, headers=self.header, callback=self
.parse_content, meta={'keys': item})
def parse_content(self, response):
item = response.meta['keys']
sel = parsel.Selector(response.text)
item['blog_name'] = sel.xpath(
'//div[@class="article__authormeta"]/a[2]/text()').extract_first()
item['blog_url'] = sel.xpath(
'//div[@class="article__authormeta"]/a[2]/@href').extract_first()
item['content'] = sel.xpath('div[@class="row"]/text()').extract_first()
yield item
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
项目:爬取思否网站首页推荐文章
作者:cho
时间:2019.9.23
"""
import json
import parsel
import scrapy
from scrapy import Request
from SF.items import SfItem
class SfCrawlSpider(scrapy.Spider):
name = 'sf_crawl'
allowed_domains = ['segmentfault.com']
header = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',
'referer': 'https://segmentfault.com/',
'content-type': 'application/json; charset=UTF-8',
}
def start_requests(self):
for page in range(1,100):
urls = 'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'.format(page)
yield Request(url=urls,headers=self.header,callback=self.parse)
def parse(self, response):
datas = json.loads(response.text)["data"]
if datas and len(datas)>0:
for data in datas:
name = data['user'][2]
user_url = 'https://segmentfault.com'+ data['user'][3]
id = data['user_id']
title = data['title']
excerpt = data['excerpt']
date = data['createdDate']
views = data['viewsWord']
votes = data['votes']
a_url = str('https://segmentfault.com'+ data['url'])
item = SfItem(name=name,user_url=user_url,id=id,title=title,excerpt=excerpt,date=date,views=views,votes=votes)
yield Request(url=a_url,headers=self.header,callback=self.parse_content,meta={'keys':item})
def parse_content(self,response):
item = response.meta['keys']
sel = parsel.Selector(response.text)
item['blog_name'] = sel.xpath('//div[@class="article__authormeta"]/a[2]/text()').extract_first()
item['blog_url'] = sel.xpath('//div[@class="article__authormeta"]/a[2]/@href').extract_first()
item['content'] = sel.xpath('div[@class="row"]/text()').extract_first()
yield item
|
flexible
|
{
"blob_id": "7ed6d475bfe36fdd0b6cd2f0902a0bccb22f7f60",
"index": 6082,
"step-1": "<mask token>\n\n\nclass SfCrawlSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SfCrawlSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n for page in range(1, 100):\n urls = (\n 'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'\n .format(page))\n yield Request(url=urls, headers=self.header, callback=self.parse)\n\n def parse(self, response):\n datas = json.loads(response.text)['data']\n if datas and len(datas) > 0:\n for data in datas:\n name = data['user'][2]\n user_url = 'https://segmentfault.com' + data['user'][3]\n id = data['user_id']\n title = data['title']\n excerpt = data['excerpt']\n date = data['createdDate']\n views = data['viewsWord']\n votes = data['votes']\n a_url = str('https://segmentfault.com' + data['url'])\n item = SfItem(name=name, user_url=user_url, id=id, title=\n title, excerpt=excerpt, date=date, views=views, votes=votes\n )\n yield Request(url=a_url, headers=self.header, callback=self\n .parse_content, meta={'keys': item})\n\n def parse_content(self, response):\n item = response.meta['keys']\n sel = parsel.Selector(response.text)\n item['blog_name'] = sel.xpath(\n '//div[@class=\"article__authormeta\"]/a[2]/text()').extract_first()\n item['blog_url'] = sel.xpath(\n '//div[@class=\"article__authormeta\"]/a[2]/@href').extract_first()\n item['content'] = sel.xpath('div[@class=\"row\"]/text()').extract_first()\n yield item\n",
"step-3": "<mask token>\n\n\nclass SfCrawlSpider(scrapy.Spider):\n name = 'sf_crawl'\n allowed_domains = ['segmentfault.com']\n header = {'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n , 'referer': 'https://segmentfault.com/', 'content-type':\n 'application/json; charset=UTF-8'}\n\n def start_requests(self):\n for page in range(1, 100):\n urls = (\n 'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'\n .format(page))\n yield Request(url=urls, headers=self.header, callback=self.parse)\n\n def parse(self, response):\n datas = json.loads(response.text)['data']\n if datas and len(datas) > 0:\n for data in datas:\n name = data['user'][2]\n user_url = 'https://segmentfault.com' + data['user'][3]\n id = data['user_id']\n title = data['title']\n excerpt = data['excerpt']\n date = data['createdDate']\n views = data['viewsWord']\n votes = data['votes']\n a_url = str('https://segmentfault.com' + data['url'])\n item = SfItem(name=name, user_url=user_url, id=id, title=\n title, excerpt=excerpt, date=date, views=views, votes=votes\n )\n yield Request(url=a_url, headers=self.header, callback=self\n .parse_content, meta={'keys': item})\n\n def parse_content(self, response):\n item = response.meta['keys']\n sel = parsel.Selector(response.text)\n item['blog_name'] = sel.xpath(\n '//div[@class=\"article__authormeta\"]/a[2]/text()').extract_first()\n item['blog_url'] = sel.xpath(\n '//div[@class=\"article__authormeta\"]/a[2]/@href').extract_first()\n item['content'] = sel.xpath('div[@class=\"row\"]/text()').extract_first()\n yield item\n",
"step-4": "<mask token>\nimport json\nimport parsel\nimport scrapy\nfrom scrapy import Request\nfrom SF.items import SfItem\n\n\nclass SfCrawlSpider(scrapy.Spider):\n name = 'sf_crawl'\n allowed_domains = ['segmentfault.com']\n header = {'user-agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n , 'referer': 'https://segmentfault.com/', 'content-type':\n 'application/json; charset=UTF-8'}\n\n def start_requests(self):\n for page in range(1, 100):\n urls = (\n 'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'\n .format(page))\n yield Request(url=urls, headers=self.header, callback=self.parse)\n\n def parse(self, response):\n datas = json.loads(response.text)['data']\n if datas and len(datas) > 0:\n for data in datas:\n name = data['user'][2]\n user_url = 'https://segmentfault.com' + data['user'][3]\n id = data['user_id']\n title = data['title']\n excerpt = data['excerpt']\n date = data['createdDate']\n views = data['viewsWord']\n votes = data['votes']\n a_url = str('https://segmentfault.com' + data['url'])\n item = SfItem(name=name, user_url=user_url, id=id, title=\n title, excerpt=excerpt, date=date, views=views, votes=votes\n )\n yield Request(url=a_url, headers=self.header, callback=self\n .parse_content, meta={'keys': item})\n\n def parse_content(self, response):\n item = response.meta['keys']\n sel = parsel.Selector(response.text)\n item['blog_name'] = sel.xpath(\n '//div[@class=\"article__authormeta\"]/a[2]/text()').extract_first()\n item['blog_url'] = sel.xpath(\n '//div[@class=\"article__authormeta\"]/a[2]/@href').extract_first()\n item['content'] = sel.xpath('div[@class=\"row\"]/text()').extract_first()\n yield item\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n项目:爬取思否网站首页推荐文章\n作者:cho\n时间:2019.9.23\n\"\"\"\nimport json\n\nimport parsel\nimport scrapy\nfrom scrapy import Request\n\nfrom SF.items import SfItem\n\n\nclass SfCrawlSpider(scrapy.Spider):\n name = 'sf_crawl'\n allowed_domains = ['segmentfault.com']\n header = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',\n 'referer': 'https://segmentfault.com/',\n 'content-type': 'application/json; charset=UTF-8',\n }\n\n def start_requests(self):\n for page in range(1,100):\n urls = 'https://segmentfault.com/api/timelines/recommend?page={}&_=4f2739f6f7dc1221704e01a1dfb7b8c7'.format(page)\n yield Request(url=urls,headers=self.header,callback=self.parse)\n\n def parse(self, response):\n\n datas = json.loads(response.text)[\"data\"]\n\n if datas and len(datas)>0:\n for data in datas:\n name = data['user'][2]\n user_url = 'https://segmentfault.com'+ data['user'][3]\n id = data['user_id']\n title = data['title']\n excerpt = data['excerpt']\n date = data['createdDate']\n views = data['viewsWord']\n votes = data['votes']\n a_url = str('https://segmentfault.com'+ data['url'])\n item = SfItem(name=name,user_url=user_url,id=id,title=title,excerpt=excerpt,date=date,views=views,votes=votes)\n yield Request(url=a_url,headers=self.header,callback=self.parse_content,meta={'keys':item})\n\n def parse_content(self,response):\n item = response.meta['keys']\n sel = parsel.Selector(response.text)\n item['blog_name'] = sel.xpath('//div[@class=\"article__authormeta\"]/a[2]/text()').extract_first()\n item['blog_url'] = sel.xpath('//div[@class=\"article__authormeta\"]/a[2]/@href').extract_first()\n item['content'] = sel.xpath('div[@class=\"row\"]/text()').extract_first()\n yield item\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
from pyloom import *
import random
import string
alphabet = string.ascii_letters
def random_string(N):
return ''.join([random.choice(alphabet) for _ in range(N)])
class TestBloomFilter(object):
def test_setup(self):
bf = BloomFilter(1000)
assert 10 == bf._num_hashes
assert 14380 == bf._num_bits
assert 14380 == len(bf._bitarray)
# and initially all bits are False
assert 0 == bf._bitarray.count()
# test again with a different false positive rate
bf = BloomFilter(1000, error=0.01)
assert 7 == bf._num_hashes
assert 9583 == bf._num_bits
assert 9583 == len(bf._bitarray)
# and initially all bits are False
assert 0 == bf._bitarray.count()
def test_add_contains(self):
bf = BloomFilter(1000, error=0.01)
keys1 = [random_string(10) for _ in range(1000)]
keys2 = [random_string(10) for _ in range(1000)]
for k in keys1:
bf.add(k)
assert k in bf
class TestScalableBloomFilter(object):
def test_scaling(self):
S, N, E = 1000, 10000, 0.01
# create a bloom filter with initial capacity of S
sbf = ScalableBloomFilter(S, E, 2)
keys1 = {random_string(10) for _ in range(N)}
keys2 = {random_string(10) for _ in range(N)}
for k in keys1:
sbf.add(k)
assert k in sbf
error = 0
total = 0
for k in keys2:
if k in keys1:
continue
total += 1
if k in sbf:
error += 1
error_rate = error / total
assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (error_rate, E)
|
normal
|
{
"blob_id": "24e486edc6f80e0b7d58b5df898e6d34f53111c8",
"index": 4389,
"step-1": "<mask token>\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-2": "<mask token>\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-3": "<mask token>\nalphabet = string.ascii_letters\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-4": "from pyloom import *\nimport random\nimport string\nalphabet = string.ascii_letters\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n for k in keys1:\n bf.add(k)\n assert k in bf\n\n\nclass TestScalableBloomFilter(object):\n\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n total += 1\n if k in sbf:\n error += 1\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (\n error_rate, E)\n",
"step-5": "from pyloom import *\n\nimport random\nimport string\n\nalphabet = string.ascii_letters\n\n\ndef random_string(N):\n return ''.join([random.choice(alphabet) for _ in range(N)])\n\n\nclass TestBloomFilter(object):\n def test_setup(self):\n bf = BloomFilter(1000)\n assert 10 == bf._num_hashes\n assert 14380 == bf._num_bits\n assert 14380 == len(bf._bitarray)\n\n # and initially all bits are False\n assert 0 == bf._bitarray.count()\n\n # test again with a different false positive rate\n bf = BloomFilter(1000, error=0.01)\n assert 7 == bf._num_hashes\n assert 9583 == bf._num_bits\n assert 9583 == len(bf._bitarray)\n\n # and initially all bits are False\n assert 0 == bf._bitarray.count()\n\n def test_add_contains(self):\n bf = BloomFilter(1000, error=0.01)\n keys1 = [random_string(10) for _ in range(1000)]\n keys2 = [random_string(10) for _ in range(1000)]\n\n for k in keys1:\n bf.add(k)\n assert k in bf\nclass TestScalableBloomFilter(object):\n def test_scaling(self):\n S, N, E = 1000, 10000, 0.01\n\n # create a bloom filter with initial capacity of S\n sbf = ScalableBloomFilter(S, E, 2)\n keys1 = {random_string(10) for _ in range(N)}\n keys2 = {random_string(10) for _ in range(N)}\n\n for k in keys1:\n sbf.add(k)\n assert k in sbf\n\n error = 0\n total = 0\n for k in keys2:\n if k in keys1:\n continue\n\n total += 1\n if k in sbf:\n error += 1\n\n error_rate = error / total\n assert error_rate <= 2 * 0.01, 'Error rate is %.3f when it should be %.3f' % (error_rate, E)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
saveResult(
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here'
, results)
<|reserved_special_token_0|>
plt.plot(epoch_count, training_loss, 'r--')
plt.legend(['Training Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('U-Net Training Loss Function')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
data_gen_args = dict(horizontal_flip=True, vertical_flip=True)
imageTargetSize = 256, 256
trainPath = (
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Training'
)
trainImagePath = 'Selected Images Training'
trainLabelPath = 'Selected Images Label Binarized Training'
trainGene = trainGenerator(batch_size=1, train_path=trainPath,
trainImage_folder=trainImagePath, trainLabel_folder=trainLabelPath,
aug_dict=data_gen_args, save_to_dir=None, target_size=imageTargetSize,
trainImage_color_mode='grayscale', trainLabel_color_mode='grayscale',
trainImage_save_prefix='Image', trainLabel_save_prefix='Label', seed=1,
flag_multi_class=False, num_class=2)
<|reserved_special_token_0|>
model = unet()
model_checkpoint = ModelCheckpoint('unet_testing.hdf5', monitor='loss',
verbose=1, save_best_only=True)
trainHistory = model.fit_generator(trainGene, steps_per_epoch=100, epochs=7,
callbacks=[model_checkpoint])
testImagePath = (
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Test/Selected Images Test'
)
testGene = testGenerator(test_path=testImagePath, target_size=
imageTargetSize, flag_multi_class=False, as_gray=True)
results = model.predict_generator(testGene, len(os.listdir(testImagePath)),
verbose=1)
saveResult(
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here'
, results)
training_loss = trainHistory.history['loss']
epoch_count = range(1, len(training_loss) + 1)
plt.plot(epoch_count, training_loss, 'r--')
plt.legend(['Training Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('U-Net Training Loss Function')
plt.show()
<|reserved_special_token_1|>
from model import *
from data import *
import os
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
data_gen_args = dict(horizontal_flip=True, vertical_flip=True)
imageTargetSize = 256, 256
trainPath = (
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Training'
)
trainImagePath = 'Selected Images Training'
trainLabelPath = 'Selected Images Label Binarized Training'
trainGene = trainGenerator(batch_size=1, train_path=trainPath,
trainImage_folder=trainImagePath, trainLabel_folder=trainLabelPath,
aug_dict=data_gen_args, save_to_dir=None, target_size=imageTargetSize,
trainImage_color_mode='grayscale', trainLabel_color_mode='grayscale',
trainImage_save_prefix='Image', trainLabel_save_prefix='Label', seed=1,
flag_multi_class=False, num_class=2)
<|reserved_special_token_0|>
model = unet()
model_checkpoint = ModelCheckpoint('unet_testing.hdf5', monitor='loss',
verbose=1, save_best_only=True)
trainHistory = model.fit_generator(trainGene, steps_per_epoch=100, epochs=7,
callbacks=[model_checkpoint])
testImagePath = (
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Test/Selected Images Test'
)
testGene = testGenerator(test_path=testImagePath, target_size=
imageTargetSize, flag_multi_class=False, as_gray=True)
results = model.predict_generator(testGene, len(os.listdir(testImagePath)),
verbose=1)
saveResult(
'/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here'
, results)
training_loss = trainHistory.history['loss']
epoch_count = range(1, len(training_loss) + 1)
plt.plot(epoch_count, training_loss, 'r--')
plt.legend(['Training Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('U-Net Training Loss Function')
plt.show()
<|reserved_special_token_1|>
from model import *
from data import *
import os
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
data_gen_args = dict(horizontal_flip = True,
vertical_flip = True)
imageTargetSize = (256, 256)
trainPath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Training'
trainImagePath = 'Selected Images Training'
trainLabelPath = 'Selected Images Label Binarized Training'
#augTrainPath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/train/aug'
#validationPath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/validation'
#validationImagePath = 'Selected Images Resized Validation'
#validationLabelPath = 'Selected Images Label Resized Binarized Validation'
trainGene = trainGenerator(batch_size = 1,
train_path = trainPath,
trainImage_folder = trainImagePath,
trainLabel_folder = trainLabelPath,
aug_dict = data_gen_args,
save_to_dir = None,
target_size = imageTargetSize,
trainImage_color_mode = 'grayscale',
trainLabel_color_mode = 'grayscale',
trainImage_save_prefix = 'Image',
trainLabel_save_prefix = 'Label',
seed = 1,
flag_multi_class = False,
num_class = 2)
""""
validationGene = validationGenerator(validation_path = validationPath,
validationImage_path = validationImagePath,
validationLabel_path = validationLabelPath,
target_size = imageTargetSize,
flag_multi_class = False,
as_gray = True)
"""
model = unet()
model_checkpoint = ModelCheckpoint('unet_testing.hdf5', monitor='loss',verbose=1, save_best_only=True)
trainHistory = model.fit_generator(trainGene,
steps_per_epoch=100,
epochs=7,
callbacks = [model_checkpoint]
)
testImagePath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Test/Selected Images Test'
testGene = testGenerator(test_path = testImagePath,
target_size = imageTargetSize,
flag_multi_class = False,
as_gray = True)
results = model.predict_generator(testGene, len(os.listdir(testImagePath)), verbose = 1)
saveResult("/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here", results)
training_loss = trainHistory.history['loss']
#test_loss = history.history['val_loss']
epoch_count = range(1, len(training_loss)+1)
plt.plot(epoch_count, training_loss, 'r--')
#plt.plot(epoch_count, test_loss, 'b-')
plt.legend(['Training Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('U-Net Training Loss Function')
plt.show();
|
flexible
|
{
"blob_id": "ba379ed90bccd05d058f69f33a960779f8b8bcd5",
"index": 5632,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsaveResult(\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here'\n , results)\n<mask token>\nplt.plot(epoch_count, training_loss, 'r--')\nplt.legend(['Training Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.title('U-Net Training Loss Function')\nplt.show()\n",
"step-3": "<mask token>\ndata_gen_args = dict(horizontal_flip=True, vertical_flip=True)\nimageTargetSize = 256, 256\ntrainPath = (\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Training'\n )\ntrainImagePath = 'Selected Images Training'\ntrainLabelPath = 'Selected Images Label Binarized Training'\ntrainGene = trainGenerator(batch_size=1, train_path=trainPath,\n trainImage_folder=trainImagePath, trainLabel_folder=trainLabelPath,\n aug_dict=data_gen_args, save_to_dir=None, target_size=imageTargetSize,\n trainImage_color_mode='grayscale', trainLabel_color_mode='grayscale',\n trainImage_save_prefix='Image', trainLabel_save_prefix='Label', seed=1,\n flag_multi_class=False, num_class=2)\n<mask token>\nmodel = unet()\nmodel_checkpoint = ModelCheckpoint('unet_testing.hdf5', monitor='loss',\n verbose=1, save_best_only=True)\ntrainHistory = model.fit_generator(trainGene, steps_per_epoch=100, epochs=7,\n callbacks=[model_checkpoint])\ntestImagePath = (\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Test/Selected Images Test'\n )\ntestGene = testGenerator(test_path=testImagePath, target_size=\n imageTargetSize, flag_multi_class=False, as_gray=True)\nresults = model.predict_generator(testGene, len(os.listdir(testImagePath)),\n verbose=1)\nsaveResult(\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here'\n , results)\ntraining_loss = trainHistory.history['loss']\nepoch_count = range(1, len(training_loss) + 1)\nplt.plot(epoch_count, training_loss, 'r--')\nplt.legend(['Training Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.title('U-Net Training Loss Function')\nplt.show()\n",
"step-4": "from model import *\nfrom data import *\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\ndata_gen_args = dict(horizontal_flip=True, vertical_flip=True)\nimageTargetSize = 256, 256\ntrainPath = (\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Training'\n )\ntrainImagePath = 'Selected Images Training'\ntrainLabelPath = 'Selected Images Label Binarized Training'\ntrainGene = trainGenerator(batch_size=1, train_path=trainPath,\n trainImage_folder=trainImagePath, trainLabel_folder=trainLabelPath,\n aug_dict=data_gen_args, save_to_dir=None, target_size=imageTargetSize,\n trainImage_color_mode='grayscale', trainLabel_color_mode='grayscale',\n trainImage_save_prefix='Image', trainLabel_save_prefix='Label', seed=1,\n flag_multi_class=False, num_class=2)\n<mask token>\nmodel = unet()\nmodel_checkpoint = ModelCheckpoint('unet_testing.hdf5', monitor='loss',\n verbose=1, save_best_only=True)\ntrainHistory = model.fit_generator(trainGene, steps_per_epoch=100, epochs=7,\n callbacks=[model_checkpoint])\ntestImagePath = (\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Test/Selected Images Test'\n )\ntestGene = testGenerator(test_path=testImagePath, target_size=\n imageTargetSize, flag_multi_class=False, as_gray=True)\nresults = model.predict_generator(testGene, len(os.listdir(testImagePath)),\n verbose=1)\nsaveResult(\n '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here'\n , results)\ntraining_loss = trainHistory.history['loss']\nepoch_count = range(1, len(training_loss) + 1)\nplt.plot(epoch_count, training_loss, 'r--')\nplt.legend(['Training Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.title('U-Net Training Loss Function')\nplt.show()\n",
"step-5": "from model import *\nfrom data import *\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\n\ndata_gen_args = dict(horizontal_flip = True,\n vertical_flip = True)\n\n\nimageTargetSize = (256, 256)\n\n\ntrainPath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Training'\ntrainImagePath = 'Selected Images Training'\ntrainLabelPath = 'Selected Images Label Binarized Training'\n#augTrainPath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/train/aug'\n\n#validationPath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/validation'\n#validationImagePath = 'Selected Images Resized Validation'\n#validationLabelPath = 'Selected Images Label Resized Binarized Validation'\n\n\ntrainGene = trainGenerator(batch_size = 1,\n train_path = trainPath,\n trainImage_folder = trainImagePath,\n trainLabel_folder = trainLabelPath,\n aug_dict = data_gen_args,\n save_to_dir = None,\n target_size = imageTargetSize,\n trainImage_color_mode = 'grayscale',\n trainLabel_color_mode = 'grayscale',\n trainImage_save_prefix = 'Image',\n trainLabel_save_prefix = 'Label',\n seed = 1,\n flag_multi_class = False,\n num_class = 2)\n\n\"\"\"\"\nvalidationGene = validationGenerator(validation_path = validationPath,\n validationImage_path = validationImagePath,\n validationLabel_path = validationLabelPath,\n target_size = imageTargetSize,\n flag_multi_class = False,\n as_gray = True)\n\"\"\"\n\nmodel = unet()\nmodel_checkpoint = ModelCheckpoint('unet_testing.hdf5', monitor='loss',verbose=1, save_best_only=True)\ntrainHistory = model.fit_generator(trainGene,\n steps_per_epoch=100,\n epochs=7,\n callbacks = [model_checkpoint]\n )\n\n\ntestImagePath = '/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/Test/Selected Images Test'\ntestGene = testGenerator(test_path = testImagePath,\n target_size = imageTargetSize,\n flag_multi_class = False,\n as_gray = True)\n\n\nresults = model.predict_generator(testGene, len(os.listdir(testImagePath)), verbose = 1)\nsaveResult(\"/work/scratch/zhangbin/EmbryoTracking_ClaireBinZhang/MotilityAnalysis/20160317 10 dpf 60 fps 15 min (2)/here\", results)\n\n\ntraining_loss = trainHistory.history['loss']\n#test_loss = history.history['val_loss']\n\nepoch_count = range(1, len(training_loss)+1)\n\n\nplt.plot(epoch_count, training_loss, 'r--')\n#plt.plot(epoch_count, test_loss, 'b-')\nplt.legend(['Training Loss'])\nplt.xlabel('Epoch')\nplt.ylabel('Loss')\nplt.title('U-Net Training Loss Function')\nplt.show();\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main():
random_graph = create_random_graph(5, 10)
print('THE GRAPH:')
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print('\n')
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print('\n')
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError('There is no path from ' + str(start_vertex) +
' to ' + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print('THE GRAPH:')
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print('\n')
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print('\n')
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def find_accessible_vertices_backwards(graph, end_vertex):
if end_vertex not in graph.parse_vertices():
raise ValueError('The end vertex is not in the graph.')
visited = []
queue = []
next_vertex = {}
distance_to_end = {}
queue.append(end_vertex)
visited.append(end_vertex)
distance_to_end[end_vertex] = 0
while len(queue) > 0:
y = queue[0]
queue = queue[1:]
for edge in graph.parse_inbound_edges(y):
if edge.source_id not in visited:
visited.append(edge.source_id)
queue.append(edge.source_id)
distance_to_end[edge.source_id] = distance_to_end[y] + 1
next_vertex[edge.source_id] = y
return next_vertex
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError('There is no path from ' + str(start_vertex) +
' to ' + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print('THE GRAPH:')
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print('\n')
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print('\n')
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
main()
<|reserved_special_token_1|>
from Graph import create_random_graph
def find_accessible_vertices_backwards(graph, end_vertex):
if end_vertex not in graph.parse_vertices():
raise ValueError('The end vertex is not in the graph.')
visited = []
queue = []
next_vertex = {}
distance_to_end = {}
queue.append(end_vertex)
visited.append(end_vertex)
distance_to_end[end_vertex] = 0
while len(queue) > 0:
y = queue[0]
queue = queue[1:]
for edge in graph.parse_inbound_edges(y):
if edge.source_id not in visited:
visited.append(edge.source_id)
queue.append(edge.source_id)
distance_to_end[edge.source_id] = distance_to_end[y] + 1
next_vertex[edge.source_id] = y
return next_vertex
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError('There is no path from ' + str(start_vertex) +
' to ' + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print('THE GRAPH:')
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print('\n')
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print('\n')
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
main()
<|reserved_special_token_1|>
from Graph import create_random_graph
def find_accessible_vertices_backwards(graph, end_vertex):
if end_vertex not in graph.parse_vertices():
raise ValueError("The end vertex is not in the graph.")
visited = []
queue = []
next_vertex = {}
distance_to_end = {}
queue.append(end_vertex)
visited.append(end_vertex)
distance_to_end[end_vertex] = 0
while len(queue) > 0:
y = queue[0]
queue = queue[1:]
for edge in graph.parse_inbound_edges(y):
if edge.source_id not in visited:
visited.append(edge.source_id)
queue.append(edge.source_id)
distance_to_end[edge.source_id] = distance_to_end[y] + 1
next_vertex[edge.source_id] = y
return next_vertex
def find_minimum_length_path(graph, start_vertex, end_vertex):
next_vertex = find_accessible_vertices_backwards(graph, end_vertex)
if start_vertex not in next_vertex.keys():
raise ValueError("There is no path from " + str(start_vertex) + " to " + str(end_vertex))
path = [start_vertex]
last_vertex = start_vertex
reached_end = False
while not reached_end:
path.append(next_vertex[last_vertex])
last_vertex = next_vertex[last_vertex]
if path[-1] == end_vertex:
reached_end = True
return path
def main():
random_graph = create_random_graph(5, 10)
print("THE GRAPH:")
for vertex in random_graph.parse_vertices():
for edge in random_graph.parse_outbound_edges(vertex):
print(edge)
print("\n")
next_vertex = find_accessible_vertices_backwards(random_graph, 1)
print(next_vertex.keys())
print("\n")
path = find_minimum_length_path(random_graph, 1, 4)
print(path)
main()
|
flexible
|
{
"blob_id": "f882589729d74a910d20856d4dc02546fe316e0d",
"index": 2994,
"step-1": "<mask token>\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_accessible_vertices_backwards(graph, end_vertex):\n if end_vertex not in graph.parse_vertices():\n raise ValueError('The end vertex is not in the graph.')\n visited = []\n queue = []\n next_vertex = {}\n distance_to_end = {}\n queue.append(end_vertex)\n visited.append(end_vertex)\n distance_to_end[end_vertex] = 0\n while len(queue) > 0:\n y = queue[0]\n queue = queue[1:]\n for edge in graph.parse_inbound_edges(y):\n if edge.source_id not in visited:\n visited.append(edge.source_id)\n queue.append(edge.source_id)\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\n next_vertex[edge.source_id] = y\n return next_vertex\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\nmain()\n",
"step-4": "from Graph import create_random_graph\n\n\ndef find_accessible_vertices_backwards(graph, end_vertex):\n if end_vertex not in graph.parse_vertices():\n raise ValueError('The end vertex is not in the graph.')\n visited = []\n queue = []\n next_vertex = {}\n distance_to_end = {}\n queue.append(end_vertex)\n visited.append(end_vertex)\n distance_to_end[end_vertex] = 0\n while len(queue) > 0:\n y = queue[0]\n queue = queue[1:]\n for edge in graph.parse_inbound_edges(y):\n if edge.source_id not in visited:\n visited.append(edge.source_id)\n queue.append(edge.source_id)\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\n next_vertex[edge.source_id] = y\n return next_vertex\n\n\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\n if start_vertex not in next_vertex.keys():\n raise ValueError('There is no path from ' + str(start_vertex) +\n ' to ' + str(end_vertex))\n path = [start_vertex]\n last_vertex = start_vertex\n reached_end = False\n while not reached_end:\n path.append(next_vertex[last_vertex])\n last_vertex = next_vertex[last_vertex]\n if path[-1] == end_vertex:\n reached_end = True\n return path\n\n\ndef main():\n random_graph = create_random_graph(5, 10)\n print('THE GRAPH:')\n for vertex in random_graph.parse_vertices():\n for edge in random_graph.parse_outbound_edges(vertex):\n print(edge)\n print('\\n')\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\n print(next_vertex.keys())\n print('\\n')\n path = find_minimum_length_path(random_graph, 1, 4)\n print(path)\n\n\nmain()\n",
"step-5": "from Graph import create_random_graph\r\n\r\n\r\ndef find_accessible_vertices_backwards(graph, end_vertex):\r\n if end_vertex not in graph.parse_vertices():\r\n raise ValueError(\"The end vertex is not in the graph.\")\r\n\r\n visited = []\r\n queue = []\r\n next_vertex = {}\r\n distance_to_end = {}\r\n\r\n queue.append(end_vertex)\r\n visited.append(end_vertex)\r\n distance_to_end[end_vertex] = 0\r\n while len(queue) > 0:\r\n y = queue[0]\r\n queue = queue[1:]\r\n for edge in graph.parse_inbound_edges(y):\r\n if edge.source_id not in visited:\r\n visited.append(edge.source_id)\r\n queue.append(edge.source_id)\r\n distance_to_end[edge.source_id] = distance_to_end[y] + 1\r\n next_vertex[edge.source_id] = y\r\n\r\n return next_vertex\r\n\r\n\r\ndef find_minimum_length_path(graph, start_vertex, end_vertex):\r\n next_vertex = find_accessible_vertices_backwards(graph, end_vertex)\r\n\r\n if start_vertex not in next_vertex.keys():\r\n raise ValueError(\"There is no path from \" + str(start_vertex) + \" to \" + str(end_vertex))\r\n\r\n path = [start_vertex]\r\n last_vertex = start_vertex\r\n reached_end = False\r\n while not reached_end:\r\n path.append(next_vertex[last_vertex])\r\n last_vertex = next_vertex[last_vertex]\r\n if path[-1] == end_vertex:\r\n reached_end = True\r\n\r\n return path\r\n\r\n\r\ndef main():\r\n random_graph = create_random_graph(5, 10)\r\n\r\n print(\"THE GRAPH:\")\r\n for vertex in random_graph.parse_vertices():\r\n for edge in random_graph.parse_outbound_edges(vertex):\r\n print(edge)\r\n\r\n print(\"\\n\")\r\n next_vertex = find_accessible_vertices_backwards(random_graph, 1)\r\n print(next_vertex.keys())\r\n print(\"\\n\")\r\n\r\n path = find_minimum_length_path(random_graph, 1, 4)\r\n print(path)\r\n\r\n\r\nmain()",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
from . import FixtureTest
class GatesLineGeometry(FixtureTest):
def test_linear_gate(self):
# Add barrier:gates with line geometries in landuse
# Line barrier:ghate feature
self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])
self.assert_has_feature(
16, 10482, 25335, 'landuse',
{'id': 391260223, 'kind': 'gate'})
|
normal
|
{
"blob_id": "2192e328bdfa454ff1d1f66a05fb6a322c48b244",
"index": 2847,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GatesLineGeometry(FixtureTest):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GatesLineGeometry(FixtureTest):\n\n def test_linear_gate(self):\n self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])\n self.assert_has_feature(16, 10482, 25335, 'landuse', {'id': \n 391260223, 'kind': 'gate'})\n",
"step-4": "from . import FixtureTest\n\n\nclass GatesLineGeometry(FixtureTest):\n\n def test_linear_gate(self):\n self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])\n self.assert_has_feature(16, 10482, 25335, 'landuse', {'id': \n 391260223, 'kind': 'gate'})\n",
"step-5": "from . import FixtureTest\n\n\nclass GatesLineGeometry(FixtureTest):\n def test_linear_gate(self):\n # Add barrier:gates with line geometries in landuse\n # Line barrier:ghate feature\n self.load_fixtures(['http://www.openstreetmap.org/way/391260223'])\n\n self.assert_has_feature(\n 16, 10482, 25335, 'landuse',\n {'id': 391260223, 'kind': 'gate'})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
from scapy.all import *
from optparse import OptionParser
import socket
import struct
class MagicARP:
def __init__(self, iface):
self.iface = iface
self.macrecs = {}
def magic_arp(self, pkt):
# only look for queries
if ARP in pkt and pkt[ARP].op == 1:
# Get a random MAC address and remember it
mac = get_random_mac()
self.macrecs.setdefault(pkt[ARP].pdst, mac) # The 'setdefault' method will set the value only if it hasn't already been set
# create a response packet
# This is all done with scapy functions/objects
print "ARP: Resolved %s to %s" % (pkt[ARP].pdst, self.macrecs[pkt[ARP].pdst])
sendp( Ether(src=self.macrecs[pkt[ARP].pdst], dst = pkt[Ether].src, type = 2054) /
ARP(hwtype = 1, ptype=0x800, hwlen=6, plen=4, op=2,
hwsrc=self.macrecs[pkt[ARP].pdst],
hwdst=pkt[Ether].src,
psrc=pkt[ARP].pdst,
pdst=pkt[ARP].psrc),
iface = self.iface)
def get_random_mac():
"""Generate a random MAC address"""
# use the Dlink range
mac = "00:05:5D"
for i in range(0,3):
mac += ":%s" % hex(random.randrange(0,256))[2:]
return mac
def main():
clparser = OptionParser()
clparser.add_option("-i", "--interface", help="Interface to listen and send pkts on", action="store", type="string", dest="iface")
(options, args) = clparser.parse_args()
# instantiate new class
new_magic = MagicARP(options.iface)
# set up a sniffer with a callback function to 'magic_dns'. Filter for only stuff going to port 53
sniff(prn=new_magic.magic_arp, filter="arp", store=0, iface=options.iface)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "c58bfa620df9f1b1f31c83a76d0d8a4576cbd535",
"index": 7795,
"step-1": "#!/usr/bin/env python\n\nfrom scapy.all import *\nfrom optparse import OptionParser\nimport socket\nimport struct\n\n\n\n\t\nclass MagicARP:\n\t\n\tdef __init__(self, iface):\n\t\t\n\t\tself.iface = iface\n\t\tself.macrecs = {}\n\t\t\n\t\t\n\tdef magic_arp(self, pkt):\n\t\t# only look for queries\n\t\tif ARP in pkt and pkt[ARP].op == 1:\n\t\t\t\n\t\t\t# Get a random MAC address and remember it\n\t\t\tmac = get_random_mac()\t\t\t\n\t\t\tself.macrecs.setdefault(pkt[ARP].pdst, mac) # The 'setdefault' method will set the value only if it hasn't already been set\n\t\t\t\n\t\t\t# create a response packet\n\t\t\t# This is all done with scapy functions/objects\n\n\t\t\tprint \"ARP: Resolved %s to %s\" % (pkt[ARP].pdst, self.macrecs[pkt[ARP].pdst])\n\t\t\tsendp( Ether(src=self.macrecs[pkt[ARP].pdst], dst = pkt[Ether].src, type = 2054) /\n\t\t\t\tARP(hwtype = 1, ptype=0x800, hwlen=6, plen=4, op=2, \n\t\t\t\t\thwsrc=self.macrecs[pkt[ARP].pdst], \n\t\t\t\t\thwdst=pkt[Ether].src, \n\t\t\t\t\tpsrc=pkt[ARP].pdst,\n\t\t\t\t\tpdst=pkt[ARP].psrc), \n\t\t\t\tiface = self.iface)\n\t\t\t\t\ndef get_random_mac():\n\t\"\"\"Generate a random MAC address\"\"\"\n\t\n\t# use the Dlink range\n\tmac = \"00:05:5D\"\n\t\n\tfor i in range(0,3):\n\t\tmac += \":%s\" % hex(random.randrange(0,256))[2:]\n\t\t\n\t\t\n\treturn mac\n\ndef main():\n\t\n\tclparser = OptionParser()\n\n\tclparser.add_option(\"-i\", \"--interface\", help=\"Interface to listen and send pkts on\", action=\"store\", type=\"string\", dest=\"iface\")\n\t\n\n\t(options, args) = clparser.parse_args()\n\t\n\t# instantiate new class\n\t\n\tnew_magic = MagicARP(options.iface)\n\t\n\t\t\n\t# set up a sniffer with a callback function to 'magic_dns'. Filter for only stuff going to port 53\n\tsniff(prn=new_magic.magic_arp, filter=\"arp\", store=0, iface=options.iface)\n\t\t\n\nif __name__ == \"__main__\":\n\tmain()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(rest_endpoint)
<|reserved_special_token_0|>
print(run_id)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ws = Workspace.from_config()
step1 = PythonScriptStep(name='prepare data', source_directory='scripts',
script_name='data_prep.py', compute_target='aml-cluster')
step2 = PythonScriptStep(name='train model', source_directory='scripts',
script_name='train_model.py', compute_target='aml-cluster')
train_pipeline = Pipeline(workspace=ws, steps=[step1, step2])
experiment = Experiment(workspace=ws, name='training-pipeline')
pipeline_run = experiment.submit(train_pipeline)
published_pipeline = pipeline_run.publish(name='training_pipeline',
description='Model training pipeline', version='1.0')
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
response = requests.post(rest_endpoint, headers=some_auth_header, json={
'ExperimentName': 'run_training_pipeline'})
run_id = response.json()['Id']
print(run_id)
<|reserved_special_token_1|>
from azureml.core import Workspace
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
from azureml.pipeline.steps import PythonScriptStep
import requests
ws = Workspace.from_config()
step1 = PythonScriptStep(name='prepare data', source_directory='scripts',
script_name='data_prep.py', compute_target='aml-cluster')
step2 = PythonScriptStep(name='train model', source_directory='scripts',
script_name='train_model.py', compute_target='aml-cluster')
train_pipeline = Pipeline(workspace=ws, steps=[step1, step2])
experiment = Experiment(workspace=ws, name='training-pipeline')
pipeline_run = experiment.submit(train_pipeline)
published_pipeline = pipeline_run.publish(name='training_pipeline',
description='Model training pipeline', version='1.0')
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
response = requests.post(rest_endpoint, headers=some_auth_header, json={
'ExperimentName': 'run_training_pipeline'})
run_id = response.json()['Id']
print(run_id)
<|reserved_special_token_1|>
from azureml.core import Workspace
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
from azureml.pipeline.steps import PythonScriptStep
import requests
ws = Workspace.from_config()
# Step to run a Python script
step1 = PythonScriptStep(
name = "prepare data",
source_directory = "scripts",
script_name = "data_prep.py",
compute_target = "aml-cluster"
)
# Step to train a model
step2 = PythonScriptStep(
name = "train model",
source_directory = "scripts",
script_name = "train_model.py",
compute_target = "aml-cluster"
)
# Construct the pipeline
train_pipeline = Pipeline(workspace = ws, steps = [step1, step2])
# Create an experiment and run the pipeline with it
experiment = Experiment(workspace = ws, name = "training-pipeline")
pipeline_run = experiment.submit(train_pipeline)
# To run all pipeline steps without cached results
# pipeline_run = experiment.submit(train_pipeline, regenerate_outputs=True)
# Publish the pipeline run
published_pipeline = pipeline_run.publish(
name="training_pipeline",
description="Model training pipeline",
version="1.0"
)
# Get the endpoint for the published pipeline
rest_endpoint = published_pipeline.endpoint
print(rest_endpoint)
# Consume the pipeline through REST request
response = requests.post(
rest_endpoint,
headers=some_auth_header,
json={"ExperimentName": "run_training_pipeline"})
run_id = response.json()["Id"]
print(run_id)
|
flexible
|
{
"blob_id": "4a7f8221208e8252c7f5c0adff2949f0e552def1",
"index": 775,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(rest_endpoint)\n<mask token>\nprint(run_id)\n",
"step-3": "<mask token>\nws = Workspace.from_config()\nstep1 = PythonScriptStep(name='prepare data', source_directory='scripts',\n script_name='data_prep.py', compute_target='aml-cluster')\nstep2 = PythonScriptStep(name='train model', source_directory='scripts',\n script_name='train_model.py', compute_target='aml-cluster')\ntrain_pipeline = Pipeline(workspace=ws, steps=[step1, step2])\nexperiment = Experiment(workspace=ws, name='training-pipeline')\npipeline_run = experiment.submit(train_pipeline)\npublished_pipeline = pipeline_run.publish(name='training_pipeline',\n description='Model training pipeline', version='1.0')\nrest_endpoint = published_pipeline.endpoint\nprint(rest_endpoint)\nresponse = requests.post(rest_endpoint, headers=some_auth_header, json={\n 'ExperimentName': 'run_training_pipeline'})\nrun_id = response.json()['Id']\nprint(run_id)\n",
"step-4": "from azureml.core import Workspace\nfrom azureml.pipeline.core import Pipeline\nfrom azureml.core import Experiment\nfrom azureml.pipeline.steps import PythonScriptStep\nimport requests\nws = Workspace.from_config()\nstep1 = PythonScriptStep(name='prepare data', source_directory='scripts',\n script_name='data_prep.py', compute_target='aml-cluster')\nstep2 = PythonScriptStep(name='train model', source_directory='scripts',\n script_name='train_model.py', compute_target='aml-cluster')\ntrain_pipeline = Pipeline(workspace=ws, steps=[step1, step2])\nexperiment = Experiment(workspace=ws, name='training-pipeline')\npipeline_run = experiment.submit(train_pipeline)\npublished_pipeline = pipeline_run.publish(name='training_pipeline',\n description='Model training pipeline', version='1.0')\nrest_endpoint = published_pipeline.endpoint\nprint(rest_endpoint)\nresponse = requests.post(rest_endpoint, headers=some_auth_header, json={\n 'ExperimentName': 'run_training_pipeline'})\nrun_id = response.json()['Id']\nprint(run_id)\n",
"step-5": "from azureml.core import Workspace\r\nfrom azureml.pipeline.core import Pipeline\r\nfrom azureml.core import Experiment\r\nfrom azureml.pipeline.steps import PythonScriptStep\r\nimport requests\r\n\r\nws = Workspace.from_config()\r\n\r\n# Step to run a Python script\r\nstep1 = PythonScriptStep(\r\n name = \"prepare data\",\r\n source_directory = \"scripts\",\r\n script_name = \"data_prep.py\",\r\n compute_target = \"aml-cluster\"\r\n)\r\n\r\n# Step to train a model\r\nstep2 = PythonScriptStep(\r\n name = \"train model\",\r\n source_directory = \"scripts\",\r\n script_name = \"train_model.py\",\r\n compute_target = \"aml-cluster\"\r\n)\r\n\r\n# Construct the pipeline\r\ntrain_pipeline = Pipeline(workspace = ws, steps = [step1, step2])\r\n\r\n# Create an experiment and run the pipeline with it\r\nexperiment = Experiment(workspace = ws, name = \"training-pipeline\")\r\npipeline_run = experiment.submit(train_pipeline)\r\n# To run all pipeline steps without cached results\r\n# pipeline_run = experiment.submit(train_pipeline, regenerate_outputs=True)\r\n\r\n# Publish the pipeline run\r\npublished_pipeline = pipeline_run.publish(\r\n name=\"training_pipeline\",\r\n description=\"Model training pipeline\",\r\n version=\"1.0\"\r\n)\r\n\r\n# Get the endpoint for the published pipeline\r\nrest_endpoint = published_pipeline.endpoint\r\nprint(rest_endpoint)\r\n# Consume the pipeline through REST request\r\nresponse = requests.post(\r\n rest_endpoint,\r\n headers=some_auth_header,\r\n json={\"ExperimentName\": \"run_training_pipeline\"})\r\nrun_id = response.json()[\"Id\"]\r\nprint(run_id)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 17:28:48 2018
@author: otalabay
"""
LOCAL_INFO = 1
LSM = 2
TLS = 3
TLS_STOP = 4
DANGER = 5
STOP_DANGER = 6
PM = 7
PM_STOP = 8
|
normal
|
{
"blob_id": "db341c3686c53f1cd9fe98c532f17e872952cbba",
"index": 6733,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nLOCAL_INFO = 1\nLSM = 2\nTLS = 3\nTLS_STOP = 4\nDANGER = 5\nSTOP_DANGER = 6\nPM = 7\nPM_STOP = 8\n",
"step-3": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 15 17:28:48 2018\r\n\r\n@author: otalabay\r\n\"\"\"\r\n\r\nLOCAL_INFO = 1\r\nLSM = 2\r\nTLS = 3\r\nTLS_STOP = 4\r\nDANGER = 5\r\nSTOP_DANGER = 6\r\nPM = 7\r\nPM_STOP = 8",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from mcse.core.driver import BaseDriver_
class DimerGridSearch(BaseDriver_):
"""
Generates all dimer structures that should be considered for a grid search
to find the best dimer arangements. Grid search is performed over all
x,y,z positions for the COM and all orientations of the molecule. Only
dimers with physically relevant intermolecular distances are kept for the
user by providing maximum and minimum scaled vdW distances as max_sr and
min_sr. Grid search can be performed using a a single unqiue molecule or
two distinct molecules as input.
This method is parallelized using MPI. The user may launch as many MPI ranks
as they would like in order to reduce the computational burden for each
rank and speedup the time-to-solution.
Arguments
---------
min_sr: float
Minimum specific radius to use for dimer distance checks.
max_sr: float
Maximum specific radius multiplier that is allowed to be the minimum
distance between two dimers, thereby removing dimers formed from molecules
that are far away.
box: float,list
Box size to search over for x,y,z positions. It's assumes that first
molecule of the dimer will be placed at 0,0,0. If the box size is a
float, a box will be placed at 0,0,0 and will extend by this value in
all directions. If a list is provided, the box will only extend by these
lengths in the x,y,z directions respectively, and due to symmetry, in
the -x,-y,-z directions. Default behavior is that the box size will
automatically be detected based on the size of the input molecules.
grid_spacing: float
Grid spacing to use for x,y,z position spacing
angle_spacing: float
Spacing of orientation angles to use for every x,y,z position. Assumed
to be in degrees.
cutoff: float
Distance between COM to neglect from dimer grid search.
tol: float
Tolerance used for the rmsd comparison. If the difference between the
structures is less than tol, then they are considered duplicates.
vdw: list
List of all vdw radii for all elements in periodic table
bonds_kw: dict
Keyword arguments for Structure.get_bonds method. This is used
for recognizing molecular connectivity.
inter_list: list
List of tuples of elements that should be considered for the distance
calculations. For example ("Li", "O"). Then, if the distance between the
Li in one molecule and the O in another molecule is outside the min_sr
to max_sr range then the dimer system will be removed. This is helpful
to reduce the search space based on chemical intution.
"""
def __init__(self,
folder="",
min_sr=0.75,
max_sr=1.30,
box=-1,
grid_spacing=2.5,
angle_spacing=30,
inter_list=[],
tol=0.1,
vdw=[],
bonds_kw={},
comm=None):
raise Exception()
|
normal
|
{
"blob_id": "9db4bca3e907d70d9696f98506efb6d6042b5723",
"index": 6710,
"step-1": "<mask token>\n\n\nclass DimerGridSearch(BaseDriver_):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DimerGridSearch(BaseDriver_):\n <mask token>\n\n def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,\n grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],\n bonds_kw={}, comm=None):\n raise Exception()\n",
"step-3": "<mask token>\n\n\nclass DimerGridSearch(BaseDriver_):\n \"\"\"\n Generates all dimer structures that should be considered for a grid search\n to find the best dimer arangements. Grid search is performed over all \n x,y,z positions for the COM and all orientations of the molecule. Only\n dimers with physically relevant intermolecular distances are kept for the \n user by providing maximum and minimum scaled vdW distances as max_sr and \n min_sr. Grid search can be performed using a a single unqiue molecule or \n two distinct molecules as input. \n \n This method is parallelized using MPI. The user may launch as many MPI ranks\n as they would like in order to reduce the computational burden for each \n rank and speedup the time-to-solution. \n \n Arguments\n ---------\n min_sr: float\n Minimum specific radius to use for dimer distance checks.\n max_sr: float\n Maximum specific radius multiplier that is allowed to be the minimum \n distance between two dimers, thereby removing dimers formed from molecules\n that are far away. \n box: float,list\n Box size to search over for x,y,z positions. It's assumes that first \n molecule of the dimer will be placed at 0,0,0. If the box size is a \n float, a box will be placed at 0,0,0 and will extend by this value in\n all directions. If a list is provided, the box will only extend by these\n lengths in the x,y,z directions respectively, and due to symmetry, in \n the -x,-y,-z directions. Default behavior is that the box size will\n automatically be detected based on the size of the input molecules.\n grid_spacing: float\n Grid spacing to use for x,y,z position spacing\n angle_spacing: float\n Spacing of orientation angles to use for every x,y,z position. Assumed\n to be in degrees. \n cutoff: float\n Distance between COM to neglect from dimer grid search. \n tol: float\n Tolerance used for the rmsd comparison. If the difference between the\n structures is less than tol, then they are considered duplicates. \n vdw: list\n List of all vdw radii for all elements in periodic table\n bonds_kw: dict\n Keyword arguments for Structure.get_bonds method. This is used\n for recognizing molecular connectivity. \n inter_list: list\n List of tuples of elements that should be considered for the distance\n calculations. For example (\"Li\", \"O\"). Then, if the distance between the\n Li in one molecule and the O in another molecule is outside the min_sr\n to max_sr range then the dimer system will be removed. This is helpful\n to reduce the search space based on chemical intution.\n \n \"\"\"\n\n def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,\n grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],\n bonds_kw={}, comm=None):\n raise Exception()\n",
"step-4": "from mcse.core.driver import BaseDriver_\n\n\nclass DimerGridSearch(BaseDriver_):\n \"\"\"\n Generates all dimer structures that should be considered for a grid search\n to find the best dimer arangements. Grid search is performed over all \n x,y,z positions for the COM and all orientations of the molecule. Only\n dimers with physically relevant intermolecular distances are kept for the \n user by providing maximum and minimum scaled vdW distances as max_sr and \n min_sr. Grid search can be performed using a a single unqiue molecule or \n two distinct molecules as input. \n \n This method is parallelized using MPI. The user may launch as many MPI ranks\n as they would like in order to reduce the computational burden for each \n rank and speedup the time-to-solution. \n \n Arguments\n ---------\n min_sr: float\n Minimum specific radius to use for dimer distance checks.\n max_sr: float\n Maximum specific radius multiplier that is allowed to be the minimum \n distance between two dimers, thereby removing dimers formed from molecules\n that are far away. \n box: float,list\n Box size to search over for x,y,z positions. It's assumes that first \n molecule of the dimer will be placed at 0,0,0. If the box size is a \n float, a box will be placed at 0,0,0 and will extend by this value in\n all directions. If a list is provided, the box will only extend by these\n lengths in the x,y,z directions respectively, and due to symmetry, in \n the -x,-y,-z directions. Default behavior is that the box size will\n automatically be detected based on the size of the input molecules.\n grid_spacing: float\n Grid spacing to use for x,y,z position spacing\n angle_spacing: float\n Spacing of orientation angles to use for every x,y,z position. Assumed\n to be in degrees. \n cutoff: float\n Distance between COM to neglect from dimer grid search. \n tol: float\n Tolerance used for the rmsd comparison. If the difference between the\n structures is less than tol, then they are considered duplicates. \n vdw: list\n List of all vdw radii for all elements in periodic table\n bonds_kw: dict\n Keyword arguments for Structure.get_bonds method. This is used\n for recognizing molecular connectivity. \n inter_list: list\n List of tuples of elements that should be considered for the distance\n calculations. For example (\"Li\", \"O\"). Then, if the distance between the\n Li in one molecule and the O in another molecule is outside the min_sr\n to max_sr range then the dimer system will be removed. This is helpful\n to reduce the search space based on chemical intution.\n \n \"\"\"\n\n def __init__(self, folder='', min_sr=0.75, max_sr=1.3, box=-1,\n grid_spacing=2.5, angle_spacing=30, inter_list=[], tol=0.1, vdw=[],\n bonds_kw={}, comm=None):\n raise Exception()\n",
"step-5": "\n\nfrom mcse.core.driver import BaseDriver_\n\n\n\nclass DimerGridSearch(BaseDriver_):\n \"\"\"\n Generates all dimer structures that should be considered for a grid search\n to find the best dimer arangements. Grid search is performed over all \n x,y,z positions for the COM and all orientations of the molecule. Only\n dimers with physically relevant intermolecular distances are kept for the \n user by providing maximum and minimum scaled vdW distances as max_sr and \n min_sr. Grid search can be performed using a a single unqiue molecule or \n two distinct molecules as input. \n \n This method is parallelized using MPI. The user may launch as many MPI ranks\n as they would like in order to reduce the computational burden for each \n rank and speedup the time-to-solution. \n \n Arguments\n ---------\n min_sr: float\n Minimum specific radius to use for dimer distance checks.\n max_sr: float\n Maximum specific radius multiplier that is allowed to be the minimum \n distance between two dimers, thereby removing dimers formed from molecules\n that are far away. \n box: float,list\n Box size to search over for x,y,z positions. It's assumes that first \n molecule of the dimer will be placed at 0,0,0. If the box size is a \n float, a box will be placed at 0,0,0 and will extend by this value in\n all directions. If a list is provided, the box will only extend by these\n lengths in the x,y,z directions respectively, and due to symmetry, in \n the -x,-y,-z directions. Default behavior is that the box size will\n automatically be detected based on the size of the input molecules.\n grid_spacing: float\n Grid spacing to use for x,y,z position spacing\n angle_spacing: float\n Spacing of orientation angles to use for every x,y,z position. Assumed\n to be in degrees. \n cutoff: float\n Distance between COM to neglect from dimer grid search. \n tol: float\n Tolerance used for the rmsd comparison. If the difference between the\n structures is less than tol, then they are considered duplicates. \n vdw: list\n List of all vdw radii for all elements in periodic table\n bonds_kw: dict\n Keyword arguments for Structure.get_bonds method. This is used\n for recognizing molecular connectivity. \n inter_list: list\n List of tuples of elements that should be considered for the distance\n calculations. For example (\"Li\", \"O\"). Then, if the distance between the\n Li in one molecule and the O in another molecule is outside the min_sr\n to max_sr range then the dimer system will be removed. This is helpful\n to reduce the search space based on chemical intution.\n \n \"\"\"\n def __init__(self, \n folder=\"\",\n min_sr=0.75,\n max_sr=1.30,\n box=-1, \n grid_spacing=2.5, \n angle_spacing=30, \n inter_list=[],\n tol=0.1,\n vdw=[],\n bonds_kw={},\n comm=None):\n raise Exception()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class figure:
<|reserved_special_token_0|>
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + '_' + str(n)):
os.mkdir(path + '_' + str(n))
break
else:
n += 1
return path + '_' + str(n) + '/'
def make_num_directory(self, name, num):
os.mkdir(self.dire + '/' + name + '_' + str(num))
return self.dire + '/' + name + '_' + str(num) + '/'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x) / 10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='truth rule number')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='prediction fail app')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
cap_rule_num, label='captured rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
add_rule_num, label='add rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
lost_rule_num, label='lost rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
useless_rule_num, label='useless rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
merge_rule_num, label='merge rule')
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class figure:
def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,
eval_loss=None, different_dir_app=True, reference_steps=0,
reveal_trend=1):
self.dire = self.new_num_directory(dire)
self.app_dire = [self.make_num_directory('app', i) for i in range(
data.app_num)]
self.trend_dire = [self.make_num_directory('trend', i) for i in
range(len(data.trend_rule.w))]
self.dpi = dpi
self.span = span
self.app = data.apps
self.trend_rule = data.trend_rule
self.prediction = CIM.prediction
self.prediction_e = CIM.prediction_est_rule
self.prediction_only_ci = CIM.prediction_only_ci
self.predfail_app_num = CIM.predfail_app_num
self.cap_rule_num = CIM.cap_rule_num
self.add_rule_num = CIM.add_rule_num
self.lost_rule_num = CIM.lost_rule_num
self.useless_rule_num = CIM.useless_rule_num
self.merge_rule_num = CIM.merge_rule_num
self.learn_loss = learn_loss
self.eval_loss = eval_loss
self.diff_dir = different_dir_app
self.reference_steps = reference_steps
self.reveal_trend = reveal_trend
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + '_' + str(n)):
os.mkdir(path + '_' + str(n))
break
else:
n += 1
return path + '_' + str(n) + '/'
def make_num_directory(self, name, num):
os.mkdir(self.dire + '/' + name + '_' + str(num))
return self.dire + '/' + name + '_' + str(num) + '/'
def find_min_max(self, data_list, length, standarize_zero=True):
if standarize_zero:
min = 0
max = 0
else:
min = data_list[0][0]
max = data_list[0][0]
for data in data_list:
for j in range(length):
if j < len(data):
if data[j] < min:
min = data[j]
if data[j] > max:
max = data[j]
return min, max
def savefig_result(self, name):
x = list(range(self.span))
if self.diff_dir:
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
min, max = self.find_min_max([self.prediction[i], self.
prediction_e[i]], self.span)
plt.figure(figsize=(len(x) / 10, 5.5))
for j in range(len(self.trend_rule.w)):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1,
max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1
], facecolor=cycle_tr[j], alpha=0.2, label=
'Chosenrule:' + str(j))
for j in range(self.span):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1,
max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1
], facecolor=cycle_tr[self.app[i].trend_idx[j]],
alpha=0.2)
plt.plot(x, app.trend, label='trend', linestyle='dotted',
color='black')
plt.plot(x[self.reference_steps:], self.prediction[i],
label='LSTM pred', linestyle='dotted', color='blue')
plt.plot(x[self.reference_steps + self.reveal_trend:], self
.prediction_e[i], label='CIM pred', color='orange')
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.learn_loss[i], alpha=0.3, label='learn loss')
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.eval_loss[i], alpha=0.3, marker='X', label=
'eval loss')
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
plt.plot(x, self.app[i].trend, color=cycle_app[i], label=
'trend (app:' + str(i) + ')', linestyle='dotted')
plt.plot(x[self.reference_steps:], self.prediction[i],
color=cycle_app[i], label='pred (app:' + str(i) + ')')
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.learn_loss[i], color=cycle_app[i], alpha=
0.3, label='learn loss (app:' + str(i) + ')')
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.eval_loss[i], color=cycle_app[i], alpha=0.3,
marker='X', label='evalu loss (app:' + str(i) + ')')
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_ruleweight(self, name):
x = list(range(self.span))
if self.diff_dir:
if len(self.trend_rule.w[0]['value']) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]['value']) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.figure(figsize=(len(x) / 10, 5.5))
for j in range(len(self.trend_rule.w[i]['value'])):
plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],
color=cycle_ft[j], label='feature:' + str(j))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
if len(self.trend_rule.w[0]['value']) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]['value']) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
width = 0.8 / len(self.trend_rule.w[0]['value'])
for i in range(len(self.trend_rule.w)):
bottom = np.array(-i * 2.0)
for j in range(len(self.trend_rule.w[i]['value'])):
if i == 0:
plt.bar(x + np.array([width * float(j)] * len(x)),
self.trend_rule.w[i][j][:-1], color=cycle_ft[j],
align='edge', bottom=bottom, width=width, label
='feature:' + str(j))
else:
plt.bar(x + np.array([width * float(j)] * len(x)),
self.trend_rule.w[i]['value'][j][:-1], color=
cycle_ft[j], align='edge', bottom=bottom, width
=width)
plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *
(len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),
facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +
str(i))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_chosenrule(self, name):
x = list(range(self.span))
if self.diff_dir:
pass
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s
=1, marker='D', label='trendrule:' + str(i))
for id in range(len(self.app)):
colorArr = []
for i in self.app[id].trend_idx:
colorArr.append(cycle_tr[i])
plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id
], s=150, label='app:' + str(id))
plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)
plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=
15, marker='D', alpha=0.5)
plt.xlabel('シーズン')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
<|reserved_special_token_0|>
def savefig_compare_prediction_ave(self, name):
x = list(range(self.span))
if self.diff_dir:
prediction = []
prediction_e = []
prediction_ci = []
for j in range(self.span - self.reference_steps):
sum = 0
sum_e = 0
sum_ci = 0
for i in range(len(self.app)):
sum += (self.prediction[i][j] - self.app[i].trend[j +
self.reference_steps]) ** 2
if (j < self.span - self.reference_steps - self.
reveal_trend):
sum_e += (self.prediction_e[i][j] - self.app[i].
trend[j + self.reference_steps + self.reveal_trend]
) ** 2
sum_ci += (self.prediction_e[i][j] - self.app[i].
trend[j + self.reference_steps + self.reveal_trend]
) ** 2
prediction.append(sum / len(self.app))
if j < self.span - self.reference_steps - self.reveal_trend:
prediction_e.append(sum_e / len(self.app))
prediction_ci.append(sum_ci / len(self.app))
plt.figure(figsize=(len(x) / 10, 5.5))
plt.xlabel('season')
plt.ylabel('prediction loss average')
plt.plot(x[self.reference_steps + self.reveal_trend:],
prediction_ci, label='only CI loss', linestyle='dotted')
plt.plot(x[self.reference_steps:], prediction, label=
'LSTM loss', linestyle='dotted')
plt.plot(x[self.reference_steps + self.reveal_trend:],
prediction_e, label='CIM loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x) / 10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='truth rule number')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='prediction fail app')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
cap_rule_num, label='captured rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
add_rule_num, label='add rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
lost_rule_num, label='lost rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
useless_rule_num, label='useless rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
merge_rule_num, label='merge rule')
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def save_config(self, name, cfg):
import json
setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg
.REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,
SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.
APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,
EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,
SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.
EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg
.EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg
.EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.
THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,
LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.
LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[
'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE
], FIRST_BIN=cfg.FIRST_BIN)
fw = open(self.dire + name + '.json', 'w')
json.dump(setting, fw, indent=4)
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class figure:
def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,
eval_loss=None, different_dir_app=True, reference_steps=0,
reveal_trend=1):
self.dire = self.new_num_directory(dire)
self.app_dire = [self.make_num_directory('app', i) for i in range(
data.app_num)]
self.trend_dire = [self.make_num_directory('trend', i) for i in
range(len(data.trend_rule.w))]
self.dpi = dpi
self.span = span
self.app = data.apps
self.trend_rule = data.trend_rule
self.prediction = CIM.prediction
self.prediction_e = CIM.prediction_est_rule
self.prediction_only_ci = CIM.prediction_only_ci
self.predfail_app_num = CIM.predfail_app_num
self.cap_rule_num = CIM.cap_rule_num
self.add_rule_num = CIM.add_rule_num
self.lost_rule_num = CIM.lost_rule_num
self.useless_rule_num = CIM.useless_rule_num
self.merge_rule_num = CIM.merge_rule_num
self.learn_loss = learn_loss
self.eval_loss = eval_loss
self.diff_dir = different_dir_app
self.reference_steps = reference_steps
self.reveal_trend = reveal_trend
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + '_' + str(n)):
os.mkdir(path + '_' + str(n))
break
else:
n += 1
return path + '_' + str(n) + '/'
def make_num_directory(self, name, num):
os.mkdir(self.dire + '/' + name + '_' + str(num))
return self.dire + '/' + name + '_' + str(num) + '/'
def find_min_max(self, data_list, length, standarize_zero=True):
if standarize_zero:
min = 0
max = 0
else:
min = data_list[0][0]
max = data_list[0][0]
for data in data_list:
for j in range(length):
if j < len(data):
if data[j] < min:
min = data[j]
if data[j] > max:
max = data[j]
return min, max
def savefig_result(self, name):
x = list(range(self.span))
if self.diff_dir:
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
min, max = self.find_min_max([self.prediction[i], self.
prediction_e[i]], self.span)
plt.figure(figsize=(len(x) / 10, 5.5))
for j in range(len(self.trend_rule.w)):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1,
max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1
], facecolor=cycle_tr[j], alpha=0.2, label=
'Chosenrule:' + str(j))
for j in range(self.span):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1,
max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1
], facecolor=cycle_tr[self.app[i].trend_idx[j]],
alpha=0.2)
plt.plot(x, app.trend, label='trend', linestyle='dotted',
color='black')
plt.plot(x[self.reference_steps:], self.prediction[i],
label='LSTM pred', linestyle='dotted', color='blue')
plt.plot(x[self.reference_steps + self.reveal_trend:], self
.prediction_e[i], label='CIM pred', color='orange')
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.learn_loss[i], alpha=0.3, label='learn loss')
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.eval_loss[i], alpha=0.3, marker='X', label=
'eval loss')
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
plt.plot(x, self.app[i].trend, color=cycle_app[i], label=
'trend (app:' + str(i) + ')', linestyle='dotted')
plt.plot(x[self.reference_steps:], self.prediction[i],
color=cycle_app[i], label='pred (app:' + str(i) + ')')
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.learn_loss[i], color=cycle_app[i], alpha=
0.3, label='learn loss (app:' + str(i) + ')')
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.eval_loss[i], color=cycle_app[i], alpha=0.3,
marker='X', label='evalu loss (app:' + str(i) + ')')
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_ruleweight(self, name):
x = list(range(self.span))
if self.diff_dir:
if len(self.trend_rule.w[0]['value']) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]['value']) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.figure(figsize=(len(x) / 10, 5.5))
for j in range(len(self.trend_rule.w[i]['value'])):
plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],
color=cycle_ft[j], label='feature:' + str(j))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
if len(self.trend_rule.w[0]['value']) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]['value']) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
width = 0.8 / len(self.trend_rule.w[0]['value'])
for i in range(len(self.trend_rule.w)):
bottom = np.array(-i * 2.0)
for j in range(len(self.trend_rule.w[i]['value'])):
if i == 0:
plt.bar(x + np.array([width * float(j)] * len(x)),
self.trend_rule.w[i][j][:-1], color=cycle_ft[j],
align='edge', bottom=bottom, width=width, label
='feature:' + str(j))
else:
plt.bar(x + np.array([width * float(j)] * len(x)),
self.trend_rule.w[i]['value'][j][:-1], color=
cycle_ft[j], align='edge', bottom=bottom, width
=width)
plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *
(len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),
facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +
str(i))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_chosenrule(self, name):
x = list(range(self.span))
if self.diff_dir:
pass
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s
=1, marker='D', label='trendrule:' + str(i))
for id in range(len(self.app)):
colorArr = []
for i in self.app[id].trend_idx:
colorArr.append(cycle_tr[i])
plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id
], s=150, label='app:' + str(id))
plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)
plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=
15, marker='D', alpha=0.5)
plt.xlabel('シーズン')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction(self, name):
x = list(range(self.span))
if self.diff_dir:
for i in range(len(self.app)):
plt.figure(figsize=(len(x) / 10, 5.5))
plt.plot(x[self.reference_steps + self.reveal_trend:], np.
abs(np.array(self.prediction_only_ci[i]) - np.array(
self.app[i].trend[self.reference_steps + self.
reveal_trend:])), label='only CI loss', linestyle=
'dotted', color='green')
plt.plot(x[self.reference_steps:], np.abs(np.array(self.
prediction[i]) - np.array(self.app[i].trend[self.
reference_steps:])), label='LSTM loss', linestyle=
'dotted', color='blue')
plt.plot(x[self.reference_steps + self.reveal_trend:], np.
abs(np.array(self.prediction_e[i]) - np.array(self.app[
i].trend[self.reference_steps + self.reveal_trend:])),
label='CIM loss', color='orange')
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for id in range(len(self.app)):
plt.plot(x[self.reference_steps:], np.abs(np.array(self.
prediction[id]) - np.array(self.app[id].trend[self.
reference_steps:])), color=cycle_app[id], label=
'classify loss (app:' + str(id) + ')', linestyle='dotted')
plt.plot(x[self.reference_steps + self.reveal_trend:], np.
abs(np.array(self.prediction_e[id]) - np.array(self.app
[id].trend[self.reference_steps + self.reveal_trend:])),
color=cycle_app[id], label='analyse loss (app:' + str(
id) + ')')
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction_ave(self, name):
x = list(range(self.span))
if self.diff_dir:
prediction = []
prediction_e = []
prediction_ci = []
for j in range(self.span - self.reference_steps):
sum = 0
sum_e = 0
sum_ci = 0
for i in range(len(self.app)):
sum += (self.prediction[i][j] - self.app[i].trend[j +
self.reference_steps]) ** 2
if (j < self.span - self.reference_steps - self.
reveal_trend):
sum_e += (self.prediction_e[i][j] - self.app[i].
trend[j + self.reference_steps + self.reveal_trend]
) ** 2
sum_ci += (self.prediction_e[i][j] - self.app[i].
trend[j + self.reference_steps + self.reveal_trend]
) ** 2
prediction.append(sum / len(self.app))
if j < self.span - self.reference_steps - self.reveal_trend:
prediction_e.append(sum_e / len(self.app))
prediction_ci.append(sum_ci / len(self.app))
plt.figure(figsize=(len(x) / 10, 5.5))
plt.xlabel('season')
plt.ylabel('prediction loss average')
plt.plot(x[self.reference_steps + self.reveal_trend:],
prediction_ci, label='only CI loss', linestyle='dotted')
plt.plot(x[self.reference_steps:], prediction, label=
'LSTM loss', linestyle='dotted')
plt.plot(x[self.reference_steps + self.reveal_trend:],
prediction_e, label='CIM loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x) / 10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='truth rule number')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='prediction fail app')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
cap_rule_num, label='captured rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
add_rule_num, label='add rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
lost_rule_num, label='lost rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
useless_rule_num, label='useless rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
merge_rule_num, label='merge rule')
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def save_config(self, name, cfg):
import json
setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg
.REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,
SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.
APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,
EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,
SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.
EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg
.EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg
.EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.
THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,
LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.
LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[
'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE
], FIRST_BIN=cfg.FIRST_BIN)
fw = open(self.dire + name + '.json', 'w')
json.dump(setting, fw, indent=4)
return
<|reserved_special_token_1|>
import os
from matplotlib import pyplot as plt
from matplotlib import colors
import numpy as np
class figure:
def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,
eval_loss=None, different_dir_app=True, reference_steps=0,
reveal_trend=1):
self.dire = self.new_num_directory(dire)
self.app_dire = [self.make_num_directory('app', i) for i in range(
data.app_num)]
self.trend_dire = [self.make_num_directory('trend', i) for i in
range(len(data.trend_rule.w))]
self.dpi = dpi
self.span = span
self.app = data.apps
self.trend_rule = data.trend_rule
self.prediction = CIM.prediction
self.prediction_e = CIM.prediction_est_rule
self.prediction_only_ci = CIM.prediction_only_ci
self.predfail_app_num = CIM.predfail_app_num
self.cap_rule_num = CIM.cap_rule_num
self.add_rule_num = CIM.add_rule_num
self.lost_rule_num = CIM.lost_rule_num
self.useless_rule_num = CIM.useless_rule_num
self.merge_rule_num = CIM.merge_rule_num
self.learn_loss = learn_loss
self.eval_loss = eval_loss
self.diff_dir = different_dir_app
self.reference_steps = reference_steps
self.reveal_trend = reveal_trend
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + '_' + str(n)):
os.mkdir(path + '_' + str(n))
break
else:
n += 1
return path + '_' + str(n) + '/'
def make_num_directory(self, name, num):
os.mkdir(self.dire + '/' + name + '_' + str(num))
return self.dire + '/' + name + '_' + str(num) + '/'
def find_min_max(self, data_list, length, standarize_zero=True):
if standarize_zero:
min = 0
max = 0
else:
min = data_list[0][0]
max = data_list[0][0]
for data in data_list:
for j in range(length):
if j < len(data):
if data[j] < min:
min = data[j]
if data[j] > max:
max = data[j]
return min, max
def savefig_result(self, name):
x = list(range(self.span))
if self.diff_dir:
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
min, max = self.find_min_max([self.prediction[i], self.
prediction_e[i]], self.span)
plt.figure(figsize=(len(x) / 10, 5.5))
for j in range(len(self.trend_rule.w)):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1,
max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1
], facecolor=cycle_tr[j], alpha=0.2, label=
'Chosenrule:' + str(j))
for j in range(self.span):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1,
max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1
], facecolor=cycle_tr[self.app[i].trend_idx[j]],
alpha=0.2)
plt.plot(x, app.trend, label='trend', linestyle='dotted',
color='black')
plt.plot(x[self.reference_steps:], self.prediction[i],
label='LSTM pred', linestyle='dotted', color='blue')
plt.plot(x[self.reference_steps + self.reveal_trend:], self
.prediction_e[i], label='CIM pred', color='orange')
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.learn_loss[i], alpha=0.3, label='learn loss')
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.eval_loss[i], alpha=0.3, marker='X', label=
'eval loss')
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
plt.plot(x, self.app[i].trend, color=cycle_app[i], label=
'trend (app:' + str(i) + ')', linestyle='dotted')
plt.plot(x[self.reference_steps:], self.prediction[i],
color=cycle_app[i], label='pred (app:' + str(i) + ')')
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.learn_loss[i], color=cycle_app[i], alpha=
0.3, label='learn loss (app:' + str(i) + ')')
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:
], self.eval_loss[i], color=cycle_app[i], alpha=0.3,
marker='X', label='evalu loss (app:' + str(i) + ')')
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_ruleweight(self, name):
x = list(range(self.span))
if self.diff_dir:
if len(self.trend_rule.w[0]['value']) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]['value']) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.figure(figsize=(len(x) / 10, 5.5))
for j in range(len(self.trend_rule.w[i]['value'])):
plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],
color=cycle_ft[j], label='feature:' + str(j))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
if len(self.trend_rule.w[0]['value']) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]['value']) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
width = 0.8 / len(self.trend_rule.w[0]['value'])
for i in range(len(self.trend_rule.w)):
bottom = np.array(-i * 2.0)
for j in range(len(self.trend_rule.w[i]['value'])):
if i == 0:
plt.bar(x + np.array([width * float(j)] * len(x)),
self.trend_rule.w[i][j][:-1], color=cycle_ft[j],
align='edge', bottom=bottom, width=width, label
='feature:' + str(j))
else:
plt.bar(x + np.array([width * float(j)] * len(x)),
self.trend_rule.w[i]['value'][j][:-1], color=
cycle_ft[j], align='edge', bottom=bottom, width
=width)
plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *
(len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),
facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +
str(i))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_chosenrule(self, name):
x = list(range(self.span))
if self.diff_dir:
pass
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s
=1, marker='D', label='trendrule:' + str(i))
for id in range(len(self.app)):
colorArr = []
for i in self.app[id].trend_idx:
colorArr.append(cycle_tr[i])
plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id
], s=150, label='app:' + str(id))
plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)
plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=
15, marker='D', alpha=0.5)
plt.xlabel('シーズン')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction(self, name):
x = list(range(self.span))
if self.diff_dir:
for i in range(len(self.app)):
plt.figure(figsize=(len(x) / 10, 5.5))
plt.plot(x[self.reference_steps + self.reveal_trend:], np.
abs(np.array(self.prediction_only_ci[i]) - np.array(
self.app[i].trend[self.reference_steps + self.
reveal_trend:])), label='only CI loss', linestyle=
'dotted', color='green')
plt.plot(x[self.reference_steps:], np.abs(np.array(self.
prediction[i]) - np.array(self.app[i].trend[self.
reference_steps:])), label='LSTM loss', linestyle=
'dotted', color='blue')
plt.plot(x[self.reference_steps + self.reveal_trend:], np.
abs(np.array(self.prediction_e[i]) - np.array(self.app[
i].trend[self.reference_steps + self.reveal_trend:])),
label='CIM loss', color='orange')
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x) / 10, 5.5))
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for id in range(len(self.app)):
plt.plot(x[self.reference_steps:], np.abs(np.array(self.
prediction[id]) - np.array(self.app[id].trend[self.
reference_steps:])), color=cycle_app[id], label=
'classify loss (app:' + str(id) + ')', linestyle='dotted')
plt.plot(x[self.reference_steps + self.reveal_trend:], np.
abs(np.array(self.prediction_e[id]) - np.array(self.app
[id].trend[self.reference_steps + self.reveal_trend:])),
color=cycle_app[id], label='analyse loss (app:' + str(
id) + ')')
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction_ave(self, name):
x = list(range(self.span))
if self.diff_dir:
prediction = []
prediction_e = []
prediction_ci = []
for j in range(self.span - self.reference_steps):
sum = 0
sum_e = 0
sum_ci = 0
for i in range(len(self.app)):
sum += (self.prediction[i][j] - self.app[i].trend[j +
self.reference_steps]) ** 2
if (j < self.span - self.reference_steps - self.
reveal_trend):
sum_e += (self.prediction_e[i][j] - self.app[i].
trend[j + self.reference_steps + self.reveal_trend]
) ** 2
sum_ci += (self.prediction_e[i][j] - self.app[i].
trend[j + self.reference_steps + self.reveal_trend]
) ** 2
prediction.append(sum / len(self.app))
if j < self.span - self.reference_steps - self.reveal_trend:
prediction_e.append(sum_e / len(self.app))
prediction_ci.append(sum_ci / len(self.app))
plt.figure(figsize=(len(x) / 10, 5.5))
plt.xlabel('season')
plt.ylabel('prediction loss average')
plt.plot(x[self.reference_steps + self.reveal_trend:],
prediction_ci, label='only CI loss', linestyle='dotted')
plt.plot(x[self.reference_steps:], prediction, label=
'LSTM loss', linestyle='dotted')
plt.plot(x[self.reference_steps + self.reveal_trend:],
prediction_e, label='CIM loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x) / 10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='truth rule number')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
predfail_app_num, label='prediction fail app')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
cap_rule_num, label='captured rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
add_rule_num, label='add rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
lost_rule_num, label='lost rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
useless_rule_num, label='useless rule')
plt.plot(x[self.reference_steps + self.reveal_trend:], self.
merge_rule_num, label='merge rule')
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + '.png', dpi=self.dpi)
plt.clf()
return
def save_config(self, name, cfg):
import json
setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg
.REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,
SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.
APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,
EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,
SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.
EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg
.EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg
.EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.
THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,
LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.
LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[
'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE
], FIRST_BIN=cfg.FIRST_BIN)
fw = open(self.dire + name + '.json', 'w')
json.dump(setting, fw, indent=4)
return
<|reserved_special_token_1|>
import os
from matplotlib import pyplot as plt
from matplotlib import colors
import numpy as np
class figure:
def __init__(self, dire, dpi, span, data, CIM,
learn_loss=None, eval_loss=None, different_dir_app=True, reference_steps=0, reveal_trend=1):
self.dire = self.new_num_directory(dire)
self.app_dire = [self.make_num_directory("app", i) for i in range(data.app_num)]
self.trend_dire = [self.make_num_directory("trend", i) for i in range(len(data.trend_rule.w))]
self.dpi = dpi
self.span = span
self.app = data.apps
self.trend_rule = data.trend_rule
self.prediction = CIM.prediction
self.prediction_e = CIM.prediction_est_rule
self.prediction_only_ci = CIM.prediction_only_ci
self.predfail_app_num = CIM.predfail_app_num
self.cap_rule_num = CIM.cap_rule_num
self.add_rule_num = CIM.add_rule_num
self.lost_rule_num = CIM.lost_rule_num
self.useless_rule_num = CIM.useless_rule_num
self.merge_rule_num = CIM.merge_rule_num
self.learn_loss = learn_loss
self.eval_loss = eval_loss
self.diff_dir = different_dir_app
self.reference_steps = reference_steps
self.reveal_trend = reveal_trend
def new_num_directory(self, path):
n = 1
while True:
if not os.path.exists(path + "_" + str(n)):
os.mkdir(path + "_" + str(n))
break
else:
n += 1
return path + "_" + str(n) + "/"
def make_num_directory(self, name, num):
os.mkdir(self.dire + "/" + name + "_" + str(num))
return self.dire + "/" + name + "_" + str(num) + "/"
def find_min_max(self, data_list, length, standarize_zero=True):
if standarize_zero:
min = 0
max = 0
else:
min = data_list[0][0]
max = data_list[0][0]
for data in data_list:
for j in range(length):
if j < len(data):
if data[j] < min:
min = data[j]
if data[j] > max:
max = data[j]
return min, max
def savefig_result(self, name):
x = list(range(self.span))
if self.diff_dir:
# トレンドルールごとの色(chosenRuleより)
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
min, max = self.find_min_max([self.prediction[i], self.prediction_e[i]], self.span)
plt.figure(figsize=(len(x) / 10, 5.5))
# (chosenRuleより)
for j in range(len(self.trend_rule.w)):
plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, max * 1.1 + 0.1],
[min * 1.1 - 0.1, min * 1.1 - 0.1],
facecolor=cycle_tr[j], alpha=0.2,
label="Chosenrule:" + str(j))
for j in range(self.span):
plt.fill_between([j - 0.5, j + 0.5], [max*1.1+0.1, max*1.1+0.1], [min*1.1-0.1, min*1.1-0.1],
facecolor=cycle_tr[self.app[i].trend_idx[j]], alpha=0.2)
plt.plot(x, app.trend, label="trend", linestyle="dotted", color="black")
plt.plot(x[self.reference_steps:], self.prediction[i],
label="LSTM pred", linestyle="dotted", color="blue")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.prediction_e[i],
label="CIM pred", color="orange")
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], alpha=0.3,
label="learn loss")
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], alpha=0.3, marker="X",
label="eval loss")
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for i, app in enumerate(self.app):
plt.plot(x, self.app[i].trend, color=cycle_app[i], label="trend (app:" + str(i) + ")", linestyle="dotted")
plt.plot(x[self.reference_steps:], self.prediction[i], color=cycle_app[i], label="pred (app:" + str(i) + ")")
if self.learn_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], color=cycle_app[i], alpha=0.3,
label="learn loss (app:" + str(i) + ")")
if self.eval_loss is not None:
plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], color=cycle_app[i], alpha=0.3, marker="X",
label="evalu loss (app:" + str(i) + ")")
plt.xlabel('season')
plt.ylabel('trend value')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_ruleweight(self, name):
x = list(range(self.span))
if self.diff_dir:
# 特徴ごとの色
if len(self.trend_rule.w[0]["value"]) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]["value"]) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
for i in range(len(self.trend_rule.w)):
plt.figure(figsize=(len(x) / 10, 5.5))
# 特徴毎に
for j in range(len(self.trend_rule.w[i]["value"])):
plt.plot(x, self.trend_rule.w[i]["value"][j][:-1], color=cycle_ft[j], label="feature:" + str(j))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.trend_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# トレンドルールごとの色
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
# 特徴ごとの色
if len(self.trend_rule.w[0]["value"]) <= 10:
cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w[0]["value"]) <= 20:
cycle_ft = plt.cm.get_cmap('tab20').colors
else:
cycle_ft = list(colors.XKCD_COLORS.items())[:100]
width = 0.8 / len(self.trend_rule.w[0]["value"])
#トレンドルール毎に
for i in range(len(self.trend_rule.w)):
bottom = np.array(- i * 2.0)
# 特徴毎に
for j in range(len(self.trend_rule.w[i]["value"])):
if i == 0:
plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][j][:-1],
color=cycle_ft[j], align='edge', bottom=bottom, width=width, label="feature:" + str(j))
else:
plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i]["value"][j][:-1],
color=cycle_ft[j], align='edge', bottom=bottom, width=width)
plt.fill_between(list(range(self.span+1)), [- i * 2.0 + 1] * (len(x)+1), [- (i+1) * 2.0 + 1] * (len(x)+1),
facecolor=cycle_tr[i], alpha=0.2, label="trendrule:" + str(i))
plt.xlabel('season')
plt.ylabel('weight of trend rule')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_chosenrule(self, name):
x = list(range(self.span))
if self.diff_dir:
pass # savefig_resultに統合
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
# トレンドルールごとの色
if len(self.trend_rule.w) <= 10:
cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.trend_rule.w) <= 20:
cycle_tr = plt.cm.get_cmap('tab20').colors
else:
cycle_tr = list(colors.XKCD_COLORS.items())[:100]
# 凡例表示用
for i in range(len(self.trend_rule.w)):
plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s=1, marker="D",
label="trendrule:" + str(i))
for id in range(len(self.app)):
colorArr = []
for i in self.app[id].trend_idx:
colorArr.append(cycle_tr[i])
plt.scatter(x, np.array([- id] * len(x)), color=cycle_app[id], s=150, label="app:" + str(id))
plt.scatter(x, np.array([- id] * len(x)), color="w", s=70)
plt.scatter(x, np.array([- id] * len(x)), color=colorArr, s=15, marker="D", alpha=0.5)
plt.xlabel('シーズン')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction(self, name):
x = list(range(self.span))
if self.diff_dir:
for i in range(len(self.app)):
plt.figure(figsize=(len(x) / 10, 5.5))
# *************************(変更してください)
plt.plot(x[self.reference_steps + self.reveal_trend:],
np.abs(np.array(self.prediction_only_ci[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),
label="only CI loss", linestyle="dotted", color="green")
plt.plot(x[self.reference_steps:],
np.abs(np.array(self.prediction[i]) - np.array(self.app[i].trend[self.reference_steps:])),
label="LSTM loss", linestyle="dotted", color="blue")
plt.plot(x[self.reference_steps + self.reveal_trend:],
np.abs(np.array(self.prediction_e[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),
label="CIM loss", color="orange")
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.app_dire[i] + name + ".png", dpi=self.dpi)
plt.clf()
else:
plt.figure(figsize=(len(x)/10, 5.5))
# アプリごとの色
if len(self.app) <= 10:
cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif len(self.app) <= 20:
cycle_app = plt.cm.get_cmap('tab20').colors
else:
cycle_app = list(colors.XKCD_COLORS.items())[:100]
for id in range(len(self.app)):
plt.plot(x[self.reference_steps:], np.abs(np.array(self.prediction[id]) - np.array(self.app[id].trend[self.reference_steps:])),
color=cycle_app[id], label="classify loss (app:" + str(id) + ")", linestyle="dotted")
plt.plot(x[self.reference_steps + self.reveal_trend:], np.abs(np.array(self.prediction_e[id]) - np.array(self.app[id].trend[self.reference_steps + self.reveal_trend:])),
color=cycle_app[id], label="analyse loss (app:" + str(id) + ")")
plt.xlabel('season')
plt.ylabel('prediction loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def savefig_compare_prediction_ave(self, name):
x = list(range(self.span))
if self.diff_dir:
prediction = []
prediction_e = []
prediction_ci = []
# 各アプリに対して平均を算出
for j in range(self.span - self.reference_steps):
sum = 0
sum_e = 0
sum_ci = 0
for i in range(len(self.app)):
sum += (self.prediction[i][j] - self.app[i].trend[j + self.reference_steps])**2
if j < self.span - self.reference_steps - self.reveal_trend:
sum_e += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2
sum_ci += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2
prediction.append(sum / len(self.app))
if j < self.span - self.reference_steps - self.reveal_trend:
prediction_e.append(sum_e / len(self.app))
prediction_ci.append(sum_ci / len(self.app))
plt.figure(figsize=(len(x) / 10, 5.5))
plt.xlabel('season')
plt.ylabel('prediction loss average')
# *************************(変更してください)
plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_ci,
label="only CI loss", linestyle="dotted")
plt.plot(x[self.reference_steps:], prediction, label="LSTM loss", linestyle="dotted")
plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_e, label="CIM loss")
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
def savefig_rule_num(self, name):
x = list(range(self.span))
plt.figure(figsize=(len(x)/10, 5.5))
chart_num = 6
width = 0.8 / chart_num
plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label="truth rule number")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label="prediction fail app")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.cap_rule_num, label="captured rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.add_rule_num, label="add rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.lost_rule_num, label="lost rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.useless_rule_num, label="useless rule")
plt.plot(x[self.reference_steps + self.reveal_trend:], self.merge_rule_num, label="merge rule")
plt.xlabel('season')
plt.ylabel('number')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.subplots_adjust(right=0.8)
plt.savefig(self.dire + name + ".png", dpi=self.dpi)
plt.clf()
return
def save_config(self, name, cfg):
import json
setting = dict(
APP_NUM = cfg.APP_NUM,
SPAN = cfg.SPAN,
REVEAL_TREND = cfg.REVEAL_TREND,
FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,
SHIFT_TREND_RULE = cfg.SHIFT_TREND_RULE,
APPEAR_RATE = cfg.APPEAR_RATE,
DISAPPEAR_RATE = cfg.DISAPPEAR_RATE,
EVALUATE_THRESHOLD_PRED_FAIL = cfg.EVALUATE_THRESHOLD_PRED_FAIL,
SAMPLING = cfg.SAMPLING,
EVALUATE_THRESHOLD_DELETE_RULE = cfg.EVALUATE_THRESHOLD_DELETE_RULE,
EVALUATE_THRESHOLD_ADD_RULE = cfg.EVALUATE_THRESHOLD_ADD_RULE,
EVALUATE_THRESHOLD_MERGE_RULE = cfg.EVALUATE_THRESHOLD_MERGE_RULE,
THRESHOLD_APPNUM = cfg.THRESHOLD_APPNUM,
TRY_NEWRULE_NUM = cfg.TRY_NEWRULE_NUM,
LSTM_REFERENCE_STEPS = cfg.LSTM_REFERENCE_STEPS,
LSTM_EPOCHS = cfg.LSTM_EPOCHS,
NN_EPOCHS = cfg.NN_EPOCHS,
DATATYPE = [dict(
name = feat["name"],
type = str(type(feat["data"]))
) for feat in cfg.DATATYPE],
FIRST_BIN = cfg.FIRST_BIN
)
fw = open(self.dire + name + '.json', 'w')
json.dump(setting, fw, indent=4)
return
|
flexible
|
{
"blob_id": "dce6ef64cf1a758ed25e11f626ce31206d18f960",
"index": 8645,
"step-1": "<mask token>\n\n\nclass figure:\n <mask token>\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,\n eval_loss=None, different_dir_app=True, reference_steps=0,\n reveal_trend=1):\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory('app', i) for i in range(\n data.app_num)]\n self.trend_dire = [self.make_num_directory('trend', i) for i in\n range(len(data.trend_rule.w))]\n self.dpi = dpi\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n self.prediction_only_ci = CIM.prediction_only_ci\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n for data in data_list:\n for j in range(length):\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n return min, max\n\n def savefig_result(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n min, max = self.find_min_max([self.prediction[i], self.\n prediction_e[i]], self.span)\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[j], alpha=0.2, label=\n 'Chosenrule:' + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[self.app[i].trend_idx[j]],\n alpha=0.2)\n plt.plot(x, app.trend, label='trend', linestyle='dotted',\n color='black')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label='LSTM pred', linestyle='dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self\n .prediction_e[i], label='CIM pred', color='orange')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], alpha=0.3, label='learn loss')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], alpha=0.3, marker='X', label=\n 'eval loss')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\n 'trend (app:' + str(i) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n color=cycle_app[i], label='pred (app:' + str(i) + ')')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], color=cycle_app[i], alpha=\n 0.3, label='learn loss (app:' + str(i) + ')')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], color=cycle_app[i], alpha=0.3,\n marker='X', label='evalu loss (app:' + str(i) + ')')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_ruleweight(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w[i]['value'])):\n plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],\n color=cycle_ft[j], label='feature:' + str(j))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n width = 0.8 / len(self.trend_rule.w[0]['value'])\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(-i * 2.0)\n for j in range(len(self.trend_rule.w[i]['value'])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i][j][:-1], color=cycle_ft[j],\n align='edge', bottom=bottom, width=width, label\n ='feature:' + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i]['value'][j][:-1], color=\n cycle_ft[j], align='edge', bottom=bottom, width\n =width)\n plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *\n (len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),\n facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +\n str(i))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_chosenrule(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n pass\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s\n =1, marker='D', label='trendrule:' + str(i))\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id\n ], s=150, label='app:' + str(id))\n plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)\n plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=\n 15, marker='D', alpha=0.5)\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n <mask token>\n\n def savefig_compare_prediction_ave(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n prediction = []\n prediction_e = []\n prediction_ci = []\n for j in range(self.span - self.reference_steps):\n sum = 0\n sum_e = 0\n sum_ci = 0\n for i in range(len(self.app)):\n sum += (self.prediction[i][j] - self.app[i].trend[j +\n self.reference_steps]) ** 2\n if (j < self.span - self.reference_steps - self.\n reveal_trend):\n sum_e += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n sum_ci += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_ci, label='only CI loss', linestyle='dotted')\n plt.plot(x[self.reference_steps:], prediction, label=\n 'LSTM loss', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_e, label='CIM loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def save_config(self, name, cfg):\n import json\n setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg\n .REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.\n APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.\n EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg\n .EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg\n .EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.\n THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.\n LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[\n 'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE\n ], FIRST_BIN=cfg.FIRST_BIN)\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n return\n",
"step-3": "<mask token>\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,\n eval_loss=None, different_dir_app=True, reference_steps=0,\n reveal_trend=1):\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory('app', i) for i in range(\n data.app_num)]\n self.trend_dire = [self.make_num_directory('trend', i) for i in\n range(len(data.trend_rule.w))]\n self.dpi = dpi\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n self.prediction_only_ci = CIM.prediction_only_ci\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n for data in data_list:\n for j in range(length):\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n return min, max\n\n def savefig_result(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n min, max = self.find_min_max([self.prediction[i], self.\n prediction_e[i]], self.span)\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[j], alpha=0.2, label=\n 'Chosenrule:' + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[self.app[i].trend_idx[j]],\n alpha=0.2)\n plt.plot(x, app.trend, label='trend', linestyle='dotted',\n color='black')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label='LSTM pred', linestyle='dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self\n .prediction_e[i], label='CIM pred', color='orange')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], alpha=0.3, label='learn loss')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], alpha=0.3, marker='X', label=\n 'eval loss')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\n 'trend (app:' + str(i) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n color=cycle_app[i], label='pred (app:' + str(i) + ')')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], color=cycle_app[i], alpha=\n 0.3, label='learn loss (app:' + str(i) + ')')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], color=cycle_app[i], alpha=0.3,\n marker='X', label='evalu loss (app:' + str(i) + ')')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_ruleweight(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w[i]['value'])):\n plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],\n color=cycle_ft[j], label='feature:' + str(j))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n width = 0.8 / len(self.trend_rule.w[0]['value'])\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(-i * 2.0)\n for j in range(len(self.trend_rule.w[i]['value'])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i][j][:-1], color=cycle_ft[j],\n align='edge', bottom=bottom, width=width, label\n ='feature:' + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i]['value'][j][:-1], color=\n cycle_ft[j], align='edge', bottom=bottom, width\n =width)\n plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *\n (len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),\n facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +\n str(i))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_chosenrule(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n pass\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s\n =1, marker='D', label='trendrule:' + str(i))\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id\n ], s=150, label='app:' + str(id))\n plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)\n plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=\n 15, marker='D', alpha=0.5)\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n for i in range(len(self.app)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_only_ci[i]) - np.array(\n self.app[i].trend[self.reference_steps + self.\n reveal_trend:])), label='only CI loss', linestyle=\n 'dotted', color='green')\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[i]) - np.array(self.app[i].trend[self.\n reference_steps:])), label='LSTM loss', linestyle=\n 'dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[i]) - np.array(self.app[\n i].trend[self.reference_steps + self.reveal_trend:])),\n label='CIM loss', color='orange')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for id in range(len(self.app)):\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[id]) - np.array(self.app[id].trend[self.\n reference_steps:])), color=cycle_app[id], label=\n 'classify loss (app:' + str(id) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[id]) - np.array(self.app\n [id].trend[self.reference_steps + self.reveal_trend:])),\n color=cycle_app[id], label='analyse loss (app:' + str(\n id) + ')')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction_ave(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n prediction = []\n prediction_e = []\n prediction_ci = []\n for j in range(self.span - self.reference_steps):\n sum = 0\n sum_e = 0\n sum_ci = 0\n for i in range(len(self.app)):\n sum += (self.prediction[i][j] - self.app[i].trend[j +\n self.reference_steps]) ** 2\n if (j < self.span - self.reference_steps - self.\n reveal_trend):\n sum_e += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n sum_ci += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_ci, label='only CI loss', linestyle='dotted')\n plt.plot(x[self.reference_steps:], prediction, label=\n 'LSTM loss', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_e, label='CIM loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def save_config(self, name, cfg):\n import json\n setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg\n .REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.\n APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.\n EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg\n .EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg\n .EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.\n THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.\n LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[\n 'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE\n ], FIRST_BIN=cfg.FIRST_BIN)\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n return\n",
"step-4": "import os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM, learn_loss=None,\n eval_loss=None, different_dir_app=True, reference_steps=0,\n reveal_trend=1):\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory('app', i) for i in range(\n data.app_num)]\n self.trend_dire = [self.make_num_directory('trend', i) for i in\n range(len(data.trend_rule.w))]\n self.dpi = dpi\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n self.prediction_only_ci = CIM.prediction_only_ci\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + '_' + str(n)):\n os.mkdir(path + '_' + str(n))\n break\n else:\n n += 1\n return path + '_' + str(n) + '/'\n\n def make_num_directory(self, name, num):\n os.mkdir(self.dire + '/' + name + '_' + str(num))\n return self.dire + '/' + name + '_' + str(num) + '/'\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n for data in data_list:\n for j in range(length):\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n return min, max\n\n def savefig_result(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n min, max = self.find_min_max([self.prediction[i], self.\n prediction_e[i]], self.span)\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[j], alpha=0.2, label=\n 'Chosenrule:' + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, \n max * 1.1 + 0.1], [min * 1.1 - 0.1, min * 1.1 - 0.1\n ], facecolor=cycle_tr[self.app[i].trend_idx[j]],\n alpha=0.2)\n plt.plot(x, app.trend, label='trend', linestyle='dotted',\n color='black')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label='LSTM pred', linestyle='dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self\n .prediction_e[i], label='CIM pred', color='orange')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], alpha=0.3, label='learn loss')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], alpha=0.3, marker='X', label=\n 'eval loss')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\n 'trend (app:' + str(i) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps:], self.prediction[i],\n color=cycle_app[i], label='pred (app:' + str(i) + ')')\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.learn_loss[i], color=cycle_app[i], alpha=\n 0.3, label='learn loss (app:' + str(i) + ')')\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:\n ], self.eval_loss[i], color=cycle_app[i], alpha=0.3,\n marker='X', label='evalu loss (app:' + str(i) + ')')\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_ruleweight(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n for j in range(len(self.trend_rule.w[i]['value'])):\n plt.plot(x, self.trend_rule.w[i]['value'][j][:-1],\n color=cycle_ft[j], label='feature:' + str(j))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w[0]['value']) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0]['value']) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n width = 0.8 / len(self.trend_rule.w[0]['value'])\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(-i * 2.0)\n for j in range(len(self.trend_rule.w[i]['value'])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i][j][:-1], color=cycle_ft[j],\n align='edge', bottom=bottom, width=width, label\n ='feature:' + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)),\n self.trend_rule.w[i]['value'][j][:-1], color=\n cycle_ft[j], align='edge', bottom=bottom, width\n =width)\n plt.fill_between(list(range(self.span + 1)), [-i * 2.0 + 1] *\n (len(x) + 1), [-(i + 1) * 2.0 + 1] * (len(x) + 1),\n facecolor=cycle_tr[i], alpha=0.2, label='trendrule:' +\n str(i))\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_chosenrule(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n pass\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s\n =1, marker='D', label='trendrule:' + str(i))\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([-id] * len(x)), color=cycle_app[id\n ], s=150, label='app:' + str(id))\n plt.scatter(x, np.array([-id] * len(x)), color='w', s=70)\n plt.scatter(x, np.array([-id] * len(x)), color=colorArr, s=\n 15, marker='D', alpha=0.5)\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n for i in range(len(self.app)):\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_only_ci[i]) - np.array(\n self.app[i].trend[self.reference_steps + self.\n reveal_trend:])), label='only CI loss', linestyle=\n 'dotted', color='green')\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[i]) - np.array(self.app[i].trend[self.\n reference_steps:])), label='LSTM loss', linestyle=\n 'dotted', color='blue')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[i]) - np.array(self.app[\n i].trend[self.reference_steps + self.reveal_trend:])),\n label='CIM loss', color='orange')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + '.png', dpi=self.dpi)\n plt.clf()\n else:\n plt.figure(figsize=(len(x) / 10, 5.5))\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n for id in range(len(self.app)):\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.\n prediction[id]) - np.array(self.app[id].trend[self.\n reference_steps:])), color=cycle_app[id], label=\n 'classify loss (app:' + str(id) + ')', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.\n abs(np.array(self.prediction_e[id]) - np.array(self.app\n [id].trend[self.reference_steps + self.reveal_trend:])),\n color=cycle_app[id], label='analyse loss (app:' + str(\n id) + ')')\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def savefig_compare_prediction_ave(self, name):\n x = list(range(self.span))\n if self.diff_dir:\n prediction = []\n prediction_e = []\n prediction_ci = []\n for j in range(self.span - self.reference_steps):\n sum = 0\n sum_e = 0\n sum_ci = 0\n for i in range(len(self.app)):\n sum += (self.prediction[i][j] - self.app[i].trend[j +\n self.reference_steps]) ** 2\n if (j < self.span - self.reference_steps - self.\n reveal_trend):\n sum_e += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n sum_ci += (self.prediction_e[i][j] - self.app[i].\n trend[j + self.reference_steps + self.reveal_trend]\n ) ** 2\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n plt.figure(figsize=(len(x) / 10, 5.5))\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_ci, label='only CI loss', linestyle='dotted')\n plt.plot(x[self.reference_steps:], prediction, label=\n 'LSTM loss', linestyle='dotted')\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n prediction_e, label='CIM loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n\n def savefig_rule_num(self, name):\n x = list(range(self.span))\n plt.figure(figsize=(len(x) / 10, 5.5))\n chart_num = 6\n width = 0.8 / chart_num\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='truth rule number')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n predfail_app_num, label='prediction fail app')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n cap_rule_num, label='captured rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n add_rule_num, label='add rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n lost_rule_num, label='lost rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n useless_rule_num, label='useless rule')\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.\n merge_rule_num, label='merge rule')\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + '.png', dpi=self.dpi)\n plt.clf()\n return\n\n def save_config(self, name, cfg):\n import json\n setting = dict(APP_NUM=cfg.APP_NUM, SPAN=cfg.SPAN, REVEAL_TREND=cfg\n .REVEAL_TREND, FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE=cfg.SHIFT_TREND_RULE, APPEAR_RATE=cfg.\n APPEAR_RATE, DISAPPEAR_RATE=cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL=cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING=cfg.SAMPLING, EVALUATE_THRESHOLD_DELETE_RULE=cfg.\n EVALUATE_THRESHOLD_DELETE_RULE, EVALUATE_THRESHOLD_ADD_RULE=cfg\n .EVALUATE_THRESHOLD_ADD_RULE, EVALUATE_THRESHOLD_MERGE_RULE=cfg\n .EVALUATE_THRESHOLD_MERGE_RULE, THRESHOLD_APPNUM=cfg.\n THRESHOLD_APPNUM, TRY_NEWRULE_NUM=cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS=cfg.LSTM_REFERENCE_STEPS, LSTM_EPOCHS=cfg.\n LSTM_EPOCHS, NN_EPOCHS=cfg.NN_EPOCHS, DATATYPE=[dict(name=feat[\n 'name'], type=str(type(feat['data']))) for feat in cfg.DATATYPE\n ], FIRST_BIN=cfg.FIRST_BIN)\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n return\n",
"step-5": "import os\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nimport numpy as np\n\n\nclass figure:\n\n def __init__(self, dire, dpi, span, data, CIM,\n learn_loss=None, eval_loss=None, different_dir_app=True, reference_steps=0, reveal_trend=1):\n\n self.dire = self.new_num_directory(dire)\n self.app_dire = [self.make_num_directory(\"app\", i) for i in range(data.app_num)]\n self.trend_dire = [self.make_num_directory(\"trend\", i) for i in range(len(data.trend_rule.w))]\n self.dpi = dpi\n\n self.span = span\n self.app = data.apps\n self.trend_rule = data.trend_rule\n self.prediction = CIM.prediction\n self.prediction_e = CIM.prediction_est_rule\n\n self.prediction_only_ci = CIM.prediction_only_ci\n\n self.predfail_app_num = CIM.predfail_app_num\n self.cap_rule_num = CIM.cap_rule_num\n self.add_rule_num = CIM.add_rule_num\n self.lost_rule_num = CIM.lost_rule_num\n self.useless_rule_num = CIM.useless_rule_num\n self.merge_rule_num = CIM.merge_rule_num\n\n self.learn_loss = learn_loss\n self.eval_loss = eval_loss\n self.diff_dir = different_dir_app\n self.reference_steps = reference_steps\n self.reveal_trend = reveal_trend\n\n\n def new_num_directory(self, path):\n n = 1\n while True:\n if not os.path.exists(path + \"_\" + str(n)):\n os.mkdir(path + \"_\" + str(n))\n break\n else:\n n += 1\n return path + \"_\" + str(n) + \"/\"\n\n\n def make_num_directory(self, name, num):\n\n os.mkdir(self.dire + \"/\" + name + \"_\" + str(num))\n\n return self.dire + \"/\" + name + \"_\" + str(num) + \"/\"\n\n\n def find_min_max(self, data_list, length, standarize_zero=True):\n\n if standarize_zero:\n min = 0\n max = 0\n else:\n min = data_list[0][0]\n max = data_list[0][0]\n\n for data in data_list:\n\n for j in range(length):\n\n if j < len(data):\n if data[j] < min:\n min = data[j]\n if data[j] > max:\n max = data[j]\n\n return min, max\n\n\n def savefig_result(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n # トレンドルールごとの色(chosenRuleより)\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n\n for i, app in enumerate(self.app):\n\n min, max = self.find_min_max([self.prediction[i], self.prediction_e[i]], self.span)\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n # (chosenRuleより)\n for j in range(len(self.trend_rule.w)):\n plt.fill_between([j - 0.5, j + 0.5], [max * 1.1 + 0.1, max * 1.1 + 0.1],\n [min * 1.1 - 0.1, min * 1.1 - 0.1],\n facecolor=cycle_tr[j], alpha=0.2,\n label=\"Chosenrule:\" + str(j))\n for j in range(self.span):\n plt.fill_between([j - 0.5, j + 0.5], [max*1.1+0.1, max*1.1+0.1], [min*1.1-0.1, min*1.1-0.1],\n facecolor=cycle_tr[self.app[i].trend_idx[j]], alpha=0.2)\n\n\n plt.plot(x, app.trend, label=\"trend\", linestyle=\"dotted\", color=\"black\")\n plt.plot(x[self.reference_steps:], self.prediction[i],\n label=\"LSTM pred\", linestyle=\"dotted\", color=\"blue\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.prediction_e[i],\n label=\"CIM pred\", color=\"orange\")\n\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], alpha=0.3,\n label=\"learn loss\")\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], alpha=0.3, marker=\"X\",\n label=\"eval loss\")\n\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + \".png\", dpi=self.dpi)\n plt.clf()\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # アプリごとの色\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n\n for i, app in enumerate(self.app):\n plt.plot(x, self.app[i].trend, color=cycle_app[i], label=\"trend (app:\" + str(i) + \")\", linestyle=\"dotted\")\n plt.plot(x[self.reference_steps:], self.prediction[i], color=cycle_app[i], label=\"pred (app:\" + str(i) + \")\")\n\n if self.learn_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.learn_loss[i], color=cycle_app[i], alpha=0.3,\n label=\"learn loss (app:\" + str(i) + \")\")\n if self.eval_loss is not None:\n plt.scatter(x[self.reference_steps + self.reveal_trend:], self.eval_loss[i], color=cycle_app[i], alpha=0.3, marker=\"X\",\n label=\"evalu loss (app:\" + str(i) + \")\")\n\n plt.xlabel('season')\n plt.ylabel('trend value')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n plt.clf()\n\n return\n\n\n def savefig_ruleweight(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n # 特徴ごとの色\n if len(self.trend_rule.w[0][\"value\"]) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0][\"value\"]) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n\n for i in range(len(self.trend_rule.w)):\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n # 特徴毎に\n for j in range(len(self.trend_rule.w[i][\"value\"])):\n plt.plot(x, self.trend_rule.w[i][\"value\"][j][:-1], color=cycle_ft[j], label=\"feature:\" + str(j))\n\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.trend_dire[i] + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # トレンドルールごとの色\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n\n # 特徴ごとの色\n if len(self.trend_rule.w[0][\"value\"]) <= 10:\n cycle_ft = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w[0][\"value\"]) <= 20:\n cycle_ft = plt.cm.get_cmap('tab20').colors\n else:\n cycle_ft = list(colors.XKCD_COLORS.items())[:100]\n\n width = 0.8 / len(self.trend_rule.w[0][\"value\"])\n #トレンドルール毎に\n for i in range(len(self.trend_rule.w)):\n bottom = np.array(- i * 2.0)\n # 特徴毎に\n for j in range(len(self.trend_rule.w[i][\"value\"])):\n if i == 0:\n plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][j][:-1],\n color=cycle_ft[j], align='edge', bottom=bottom, width=width, label=\"feature:\" + str(j))\n else:\n plt.bar(x + np.array([width * float(j)] * len(x)), self.trend_rule.w[i][\"value\"][j][:-1],\n color=cycle_ft[j], align='edge', bottom=bottom, width=width)\n\n plt.fill_between(list(range(self.span+1)), [- i * 2.0 + 1] * (len(x)+1), [- (i+1) * 2.0 + 1] * (len(x)+1),\n facecolor=cycle_tr[i], alpha=0.2, label=\"trendrule:\" + str(i))\n\n plt.xlabel('season')\n plt.ylabel('weight of trend rule')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n\n def savefig_chosenrule(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n pass # savefig_resultに統合\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # アプリごとの色\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n\n # トレンドルールごとの色\n if len(self.trend_rule.w) <= 10:\n cycle_tr = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.trend_rule.w) <= 20:\n cycle_tr = plt.cm.get_cmap('tab20').colors\n else:\n cycle_tr = list(colors.XKCD_COLORS.items())[:100]\n\n # 凡例表示用\n for i in range(len(self.trend_rule.w)):\n plt.scatter(x, np.array([0] * len(x)), color=cycle_tr[i], s=1, marker=\"D\",\n label=\"trendrule:\" + str(i))\n\n for id in range(len(self.app)):\n colorArr = []\n for i in self.app[id].trend_idx:\n colorArr.append(cycle_tr[i])\n plt.scatter(x, np.array([- id] * len(x)), color=cycle_app[id], s=150, label=\"app:\" + str(id))\n plt.scatter(x, np.array([- id] * len(x)), color=\"w\", s=70)\n plt.scatter(x, np.array([- id] * len(x)), color=colorArr, s=15, marker=\"D\", alpha=0.5)\n\n plt.xlabel('シーズン')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n\n def savefig_compare_prediction(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n for i in range(len(self.app)):\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n # *************************(変更してください)\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n np.abs(np.array(self.prediction_only_ci[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),\n label=\"only CI loss\", linestyle=\"dotted\", color=\"green\")\n\n plt.plot(x[self.reference_steps:],\n np.abs(np.array(self.prediction[i]) - np.array(self.app[i].trend[self.reference_steps:])),\n label=\"LSTM loss\", linestyle=\"dotted\", color=\"blue\")\n plt.plot(x[self.reference_steps + self.reveal_trend:],\n np.abs(np.array(self.prediction_e[i]) - np.array(self.app[i].trend[self.reference_steps + self.reveal_trend:])),\n label=\"CIM loss\", color=\"orange\")\n\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.app_dire[i] + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n else:\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n # アプリごとの色\n if len(self.app) <= 10:\n cycle_app = plt.rcParams['axes.prop_cycle'].by_key()['color']\n elif len(self.app) <= 20:\n cycle_app = plt.cm.get_cmap('tab20').colors\n else:\n cycle_app = list(colors.XKCD_COLORS.items())[:100]\n\n for id in range(len(self.app)):\n\n plt.plot(x[self.reference_steps:], np.abs(np.array(self.prediction[id]) - np.array(self.app[id].trend[self.reference_steps:])),\n color=cycle_app[id], label=\"classify loss (app:\" + str(id) + \")\", linestyle=\"dotted\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], np.abs(np.array(self.prediction_e[id]) - np.array(self.app[id].trend[self.reference_steps + self.reveal_trend:])),\n color=cycle_app[id], label=\"analyse loss (app:\" + str(id) + \")\")\n\n plt.xlabel('season')\n plt.ylabel('prediction loss')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n\n def savefig_compare_prediction_ave(self, name):\n\n x = list(range(self.span))\n\n if self.diff_dir:\n\n prediction = []\n prediction_e = []\n prediction_ci = []\n\n # 各アプリに対して平均を算出\n for j in range(self.span - self.reference_steps):\n\n sum = 0\n sum_e = 0\n sum_ci = 0\n\n for i in range(len(self.app)):\n\n sum += (self.prediction[i][j] - self.app[i].trend[j + self.reference_steps])**2\n if j < self.span - self.reference_steps - self.reveal_trend:\n\n sum_e += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2\n sum_ci += (self.prediction_e[i][j] - self.app[i].trend[j + self.reference_steps + self.reveal_trend])**2\n\n prediction.append(sum / len(self.app))\n if j < self.span - self.reference_steps - self.reveal_trend:\n prediction_e.append(sum_e / len(self.app))\n prediction_ci.append(sum_ci / len(self.app))\n\n plt.figure(figsize=(len(x) / 10, 5.5))\n\n plt.xlabel('season')\n plt.ylabel('prediction loss average')\n\n # *************************(変更してください)\n plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_ci,\n label=\"only CI loss\", linestyle=\"dotted\")\n\n plt.plot(x[self.reference_steps:], prediction, label=\"LSTM loss\", linestyle=\"dotted\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], prediction_e, label=\"CIM loss\")\n\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n\n def savefig_rule_num(self, name):\n\n x = list(range(self.span))\n\n plt.figure(figsize=(len(x)/10, 5.5))\n\n chart_num = 6\n width = 0.8 / chart_num\n\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label=\"truth rule number\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.predfail_app_num, label=\"prediction fail app\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.cap_rule_num, label=\"captured rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.add_rule_num, label=\"add rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.lost_rule_num, label=\"lost rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.useless_rule_num, label=\"useless rule\")\n plt.plot(x[self.reference_steps + self.reveal_trend:], self.merge_rule_num, label=\"merge rule\")\n\n plt.xlabel('season')\n plt.ylabel('number')\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')\n plt.subplots_adjust(right=0.8)\n plt.savefig(self.dire + name + \".png\", dpi=self.dpi)\n\n plt.clf()\n\n return\n\n def save_config(self, name, cfg):\n\n import json\n\n setting = dict(\n APP_NUM = cfg.APP_NUM,\n SPAN = cfg.SPAN,\n REVEAL_TREND = cfg.REVEAL_TREND,\n FIRST_RULE_NUM=cfg.FIRST_RULE_NUM,\n SHIFT_TREND_RULE = cfg.SHIFT_TREND_RULE,\n APPEAR_RATE = cfg.APPEAR_RATE,\n DISAPPEAR_RATE = cfg.DISAPPEAR_RATE,\n EVALUATE_THRESHOLD_PRED_FAIL = cfg.EVALUATE_THRESHOLD_PRED_FAIL,\n SAMPLING = cfg.SAMPLING,\n EVALUATE_THRESHOLD_DELETE_RULE = cfg.EVALUATE_THRESHOLD_DELETE_RULE,\n EVALUATE_THRESHOLD_ADD_RULE = cfg.EVALUATE_THRESHOLD_ADD_RULE,\n EVALUATE_THRESHOLD_MERGE_RULE = cfg.EVALUATE_THRESHOLD_MERGE_RULE,\n THRESHOLD_APPNUM = cfg.THRESHOLD_APPNUM,\n TRY_NEWRULE_NUM = cfg.TRY_NEWRULE_NUM,\n LSTM_REFERENCE_STEPS = cfg.LSTM_REFERENCE_STEPS,\n LSTM_EPOCHS = cfg.LSTM_EPOCHS,\n NN_EPOCHS = cfg.NN_EPOCHS,\n DATATYPE = [dict(\n name = feat[\"name\"],\n type = str(type(feat[\"data\"]))\n ) for feat in cfg.DATATYPE],\n FIRST_BIN = cfg.FIRST_BIN\n )\n\n fw = open(self.dire + name + '.json', 'w')\n json.dump(setting, fw, indent=4)\n\n return",
"step-ids": [
4,
11,
12,
13,
14
]
}
|
[
4,
11,
12,
13,
14
] |
from django.db import models
from django.utils import timezone
class User(models.Model):
class Meta:
db_table = "User"
app_label = "backlog"
webin_id = models.CharField(
"ENA's submission account id", max_length=15, unique=True, primary_key=True
)
registered = models.BooleanField(
"A copy of ENA's ROLE_METAGENOME_SUBMITTER flag. Set to True if submitter is registered with EMG.",
default=False,
)
consent_given = models.BooleanField(
"A copy of ENA's ROLE_METAGENOME_ANALYSIS flag. Set to True if submitter gave permission to access and analyse their private data.",
default=False,
)
email_address = models.CharField("Submitters email address.", max_length=200)
first_name = models.CharField(max_length=30, null=True)
surname = models.CharField(max_length=50, null=True)
first_created = models.DateTimeField(auto_now_add=True, null=True)
class Submission(models.Model):
class Meta:
db_table = "Submission"
app_label = "backlog"
primary_accession = models.CharField(max_length=20, unique=True, null=True)
secondary_accession = models.CharField(max_length=20, unique=True, null=True)
uuid = models.CharField(max_length=100, blank=True, unique=True, null=True)
created = models.DateTimeField(default=timezone.now)
submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
class Biome(models.Model):
class Meta:
db_table = "Biome"
app_label = "backlog"
biome_id = models.IntegerField(primary_key=True, unique=True)
biome_name = models.CharField(max_length=60)
lft = models.IntegerField()
rgt = models.IntegerField()
depth = models.IntegerField()
lineage = models.CharField(max_length=500)
class StudyError(models.Model):
class Meta:
db_table = "StudyErrorType"
app_label = "backlog"
name = models.CharField(max_length=100, unique=True)
description = models.TextField()
class Pipeline(models.Model):
class Meta:
db_table = "Pipeline"
app_label = "backlog"
version = models.FloatField(primary_key=True)
class Blacklist(models.Model):
class Meta:
db_table = "Blacklist"
app_label = "backlog"
date_blacklisted = models.DateField(auto_now_add=True)
pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)
error = models.ForeignKey(StudyError, on_delete=models.CASCADE)
user = models.CharField(max_length=16)
comment = models.TextField(null=False)
class Study(models.Model):
class Meta:
db_table = "Study"
app_label = "backlog"
unique_together = ("primary_accession", "secondary_accession")
primary_accession = models.CharField(max_length=20)
secondary_accession = models.CharField(max_length=20)
title = models.CharField(max_length=4000, null=True)
description = models.CharField(max_length=4000, null=True, blank=True)
scientific_name = models.CharField(max_length=200, null=True, blank=True)
public = models.BooleanField(default=True)
hold_date = models.DateField(null=True)
first_created = models.DateTimeField(auto_now_add=True, null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
ena_last_update = models.DateField(null=True)
mixs_compliant = models.NullBooleanField()
pubmed = models.TextField(null=True)
webin = models.CharField(max_length=100, null=True)
blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE, null=True)
submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
class Run(models.Model):
class Meta:
db_table = "Run"
app_label = "backlog"
study = models.ForeignKey(Study, on_delete=models.CASCADE)
primary_accession = models.CharField(max_length=20, unique=True)
sample_primary_accession = models.CharField(max_length=20, blank=True, null=True)
compressed_data_size = models.BigIntegerField(
help_text="Sum of filesizes of compressed input. (bytes)", null=True, blank=True
)
biome = models.ForeignKey(
Biome,
to_field="biome_id",
db_column="biome_id",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
inferred_biome = models.ForeignKey(
Biome,
related_name="inferred_run_biome",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
base_count = models.BigIntegerField(null=True, blank=True)
read_count = models.BigIntegerField(null=True, blank=True)
instrument_platform = models.CharField(max_length=4000)
instrument_model = models.CharField(max_length=4000)
library_strategy = models.CharField(max_length=150, null=True, db_index=True)
library_layout = models.CharField(max_length=20)
library_source = models.CharField(max_length=20, null=True)
ena_last_update = models.DateField(null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
public = models.BooleanField(default=True)
class UserRequest(models.Model):
class Meta:
db_table = "UserRequest"
app_label = "backlog"
user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column="user_id")
first_created = models.DateTimeField(auto_now_add=True, null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
priority = models.IntegerField(default=0)
rt_ticket = models.IntegerField(unique=True)
class AssemblyType(models.Model):
class Meta:
db_table = "AssemblyType"
app_label = "backlog"
assembly_type = models.CharField(max_length=80, unique=True, null=False)
def __str__(self):
return self.assembly_type
# Assemblies received from ENA
class Assembly(models.Model):
class Meta:
db_table = "Assembly"
app_label = "backlog"
study = models.ForeignKey(Study, on_delete=models.CASCADE)
primary_accession = models.CharField(max_length=20, unique=True)
biome = models.ForeignKey(
Biome,
to_field="biome_id",
db_column="biome_id",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
inferred_biome = models.ForeignKey(
Biome,
db_column="inferred_biome_id",
to_field="biome_id",
related_name="inferred_assembly_biome",
on_delete=models.DO_NOTHING,
null=True,
blank=True,
)
public = models.BooleanField(default=True)
ena_last_update = models.DateField(null=True)
assembly_type = models.ForeignKey(
"AssemblyType",
db_column="assembly_type_id",
on_delete=models.DO_NOTHING,
blank=True,
null=True,
)
class Assembler(models.Model):
class Meta:
db_table = "Assembler"
app_label = "backlog"
name = models.CharField(max_length=20)
version = models.CharField(max_length=20)
class AssemblyJobStatus(models.Model):
class Meta:
db_table = "AssemblyJobStatus"
app_label = "backlog"
description = models.CharField(max_length=100)
class AssemblyJobResult(models.Model):
class Meta:
db_table = "AssemblyJobResult"
app_label = "backlog"
execution_time = models.BigIntegerField(
help_text="Total execution time (including restarts) of the assembler, in seconds."
)
peak_mem = models.BigIntegerField(
help_text="Peak memory usage of the assembler, in megabytes."
)
n50 = models.IntegerField()
l50 = models.IntegerField()
num_contigs = models.IntegerField()
assembly_length = models.BigIntegerField()
largest_contig = models.BigIntegerField()
coverage = models.FloatField()
# average depth of coverage of the assembly
coverage_depth = models.FloatField()
class AssemblyJob(models.Model):
class Meta:
db_table = "AssemblyJob"
app_label = "backlog"
assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)
status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)
submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING, null=True)
request_id = models.ForeignKey(
UserRequest,
on_delete=models.DO_NOTHING,
null=True,
db_column="request_id",
)
directory = models.CharField(max_length=255, null=True, blank=True)
input_size = models.BigIntegerField(
help_text="Sum of filesizes of compressed input. (bytes)"
)
reason = models.TextField(
null=True,
help_text="Filled iff assembly will not be submitted to ENA, specifies the reason why.",
)
requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)
priority = models.IntegerField(
choices=[(1, "Low"), (2, "Medium"), (3, "High")], null=True
)
result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE, null=True)
estimated_peak_mem = models.BigIntegerField(
help_text="Estimated peak memory usage of the assembler, in megabytes.",
null=True,
)
uploaded_to_ena = models.NullBooleanField()
bam_uploaded = models.NullBooleanField()
new_ena_assembly = models.CharField(max_length=20, null=True)
runs = models.ManyToManyField(
Run, through="RunAssemblyJob", related_name="assemblyjobs", blank=True
)
# Assembly instances for runs
class RunAssemblyJob(models.Model):
class Meta:
db_table = "RunAssemblyJob"
app_label = "backlog"
unique_together = (("run", "assembly_job"),)
run = models.ForeignKey(Run, on_delete=models.CASCADE)
assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)
# Show all runs used to create an assembly
class RunAssembly(models.Model):
class Meta:
db_table = "RunAssembly"
app_label = "backlog"
run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)
assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)
class AnnotationJobStatus(models.Model):
class Meta:
db_table = "AnnotationJobStatus"
app_label = "backlog"
description = models.CharField(max_length=20)
class AnnotationJob(models.Model):
PRIORITY_LOW = 1
PRIORITY_MEDIUM = 2
PRIORITY_HIGH = 3
PRIORITIES = [
(PRIORITY_LOW, "Low"),
(PRIORITY_MEDIUM, "Medium"),
(PRIORITY_HIGH, "High"),
]
# Pipeline execution result status.
# For example the pipeline may find no CDS so most steps
# aren't going to be executed for this data set.
RESULT_NO_TAX = "no_tax"
RESULT_NO_QC = "no_qc"
RESULT_NO_CDS = "no_cds"
RESULT_NO_CDS_TAX = "no_cds_tax"
# pipeline completed all the stages
RESULT_FULL = "full"
RESULT_CHOICES = (
(RESULT_NO_TAX, "No Taxonomy results"),
(RESULT_NO_QC, "Failed QC"),
(RESULT_NO_CDS, "No CDS found"),
(RESULT_FULL, "No problems"),
(RESULT_NO_CDS_TAX, "No CDS or taxonomy found"),
)
pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)
status = models.ForeignKey(
AnnotationJobStatus, on_delete=models.DO_NOTHING, db_index=True
)
priority = models.IntegerField(choices=PRIORITIES)
request = models.ForeignKey(
UserRequest, on_delete=models.DO_NOTHING, null=True, db_column="request_id"
)
directory = models.CharField(max_length=255, null=True, blank=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
runs = models.ManyToManyField(
Run, through="RunAnnotationJob", related_name="annotationjobs", blank=True
)
attempt = models.IntegerField(default=0)
result_status = models.CharField(
max_length=10, choices=RESULT_CHOICES, blank=True, null=True
)
class Meta:
db_table = "AnnotationJob"
app_label = "backlog"
# Annotation instance for a run
class RunAnnotationJob(models.Model):
class Meta:
db_table = "RunAnnotationJob"
app_label = "backlog"
unique_together = (("run", "annotation_job"),)
run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)
annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)
class AssemblyAnnotationJob(models.Model):
class Meta:
db_table = "AssemblyAnnotationJob"
app_label = "backlog"
assembly = models.ForeignKey(
Assembly, on_delete=models.DO_NOTHING, related_name="assemblyannotationjobs"
)
annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)
class AssemblyProteinDB(models.Model):
STATUS_COMPLETED = 1
STATUS_FAIL = 0
STATUS = ((STATUS_COMPLETED, "Completed"), (STATUS_FAIL, "Failed"))
FAIL_FASTA_MISSING = 1
FAIL_PIPELINE_VERSION = 2
FAIL_FASTA_DIR = 3
FAIL_SUPRESSED = 4
FAIL_MGYC = 5
FAIL_MGYP = 6
FAIL_METADATA = 7
FAIL_MGYC_MGYP = 8
FAIL_MGYC_METADATA = 9
FAIL_MGYP_METADATA = 10
FAIL_MGYC_MGYP_METADATA = 11
FAIL_LEGACY = 12
FAIL_REASONS = (
(FAIL_FASTA_MISSING, "Missing protein fasta file"),
(FAIL_PIPELINE_VERSION, "Assembly was added with higher version of pipeline"),
(FAIL_FASTA_DIR, "Assembly results directory is missing"),
(FAIL_SUPRESSED, "Suppressed assembly"),
(FAIL_MGYC, "Incorrect number of sequences for MGYC.fasta"),
(FAIL_MGYP, "Incorrect number of sequences for MGYP.fasta"),
(FAIL_METADATA, "Incorrect number of records for metadata"),
(FAIL_MGYC_MGYP, "Incorrect MGYC and MGYP but metadata is OK"),
(FAIL_MGYC_METADATA, "Incorrect number of sequences for MGYC.fasta and metadata table/file"),
(FAIL_MGYP_METADATA, "Incorrect number of sequences for MGYP.fasta and metadata table/file"),
(FAIL_MGYC_MGYP_METADATA, "Incorrect number of sequences for MGYC, MGYP and metadata"),
(FAIL_LEGACY, "Assembly marked as legacy"),
)
assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)
status = models.IntegerField("status", choices=STATUS)
fail_reason = models.IntegerField(
"fail_reason", choices=FAIL_REASONS, null=True, blank=True
)
pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.DO_NOTHING)
last_updated = models.DateTimeField("Last updated", auto_now=True)
assembly_id_pdb = models.IntegerField("id_pdb", null=True)
legacy = models.IntegerField("assembly_id for new accession for legacy assembly", null=True, blank=True)
class Meta:
app_label = "backlog"
db_table = "AssemblyProteinDB"
|
normal
|
{
"blob_id": "dff5e75460637cf175b1b65af3320d01dc2e35b6",
"index": 2628,
"step-1": "<mask token>\n\n\nclass Blacklist(models.Model):\n\n\n class Meta:\n db_table = 'Blacklist'\n app_label = 'backlog'\n date_blacklisted = models.DateField(auto_now_add=True)\n pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)\n error = models.ForeignKey(StudyError, on_delete=models.CASCADE)\n user = models.CharField(max_length=16)\n comment = models.TextField(null=False)\n\n\nclass Study(models.Model):\n\n\n class Meta:\n db_table = 'Study'\n app_label = 'backlog'\n unique_together = 'primary_accession', 'secondary_accession'\n primary_accession = models.CharField(max_length=20)\n secondary_accession = models.CharField(max_length=20)\n title = models.CharField(max_length=4000, null=True)\n description = models.CharField(max_length=4000, null=True, blank=True)\n scientific_name = models.CharField(max_length=200, null=True, blank=True)\n public = models.BooleanField(default=True)\n hold_date = models.DateField(null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n ena_last_update = models.DateField(null=True)\n mixs_compliant = models.NullBooleanField()\n pubmed = models.TextField(null=True)\n webin = models.CharField(max_length=100, null=True)\n blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE,\n null=True)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Run(models.Model):\n\n\n class Meta:\n db_table = 'Run'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n sample_primary_accession = models.CharField(max_length=20, blank=True,\n null=True)\n compressed_data_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)', null=True, blank=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, related_name=\n 'inferred_run_biome', on_delete=models.DO_NOTHING, null=True, blank\n =True)\n base_count = models.BigIntegerField(null=True, blank=True)\n read_count = models.BigIntegerField(null=True, blank=True)\n instrument_platform = models.CharField(max_length=4000)\n instrument_model = models.CharField(max_length=4000)\n library_strategy = models.CharField(max_length=150, null=True, db_index\n =True)\n library_layout = models.CharField(max_length=20)\n library_source = models.CharField(max_length=20, null=True)\n ena_last_update = models.DateField(null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n public = models.BooleanField(default=True)\n\n\nclass UserRequest(models.Model):\n\n\n class Meta:\n db_table = 'UserRequest'\n app_label = 'backlog'\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column=\n 'user_id')\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n priority = models.IntegerField(default=0)\n rt_ticket = models.IntegerField(unique=True)\n\n\nclass AssemblyType(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyType'\n app_label = 'backlog'\n assembly_type = models.CharField(max_length=80, unique=True, null=False)\n\n def __str__(self):\n return self.assembly_type\n\n\nclass Assembly(models.Model):\n\n\n class Meta:\n db_table = 'Assembly'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, db_column='inferred_biome_id',\n to_field='biome_id', related_name='inferred_assembly_biome',\n on_delete=models.DO_NOTHING, null=True, blank=True)\n public = models.BooleanField(default=True)\n ena_last_update = models.DateField(null=True)\n assembly_type = models.ForeignKey('AssemblyType', db_column=\n 'assembly_type_id', on_delete=models.DO_NOTHING, blank=True, null=True)\n\n\nclass Assembler(models.Model):\n\n\n class Meta:\n db_table = 'Assembler'\n app_label = 'backlog'\n name = models.CharField(max_length=20)\n version = models.CharField(max_length=20)\n\n\nclass AssemblyJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=100)\n\n\nclass AssemblyJobResult(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobResult'\n app_label = 'backlog'\n execution_time = models.BigIntegerField(help_text=\n 'Total execution time (including restarts) of the assembler, in seconds.'\n )\n peak_mem = models.BigIntegerField(help_text=\n 'Peak memory usage of the assembler, in megabytes.')\n n50 = models.IntegerField()\n l50 = models.IntegerField()\n num_contigs = models.IntegerField()\n assembly_length = models.BigIntegerField()\n largest_contig = models.BigIntegerField()\n coverage = models.FloatField()\n coverage_depth = models.FloatField()\n\n\nclass AssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJob'\n app_label = 'backlog'\n assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)\n submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING,\n null=True)\n request_id = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n input_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)')\n reason = models.TextField(null=True, help_text=\n 'Filled iff assembly will not be submitted to ENA, specifies the reason why.'\n )\n requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n priority = models.IntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3,\n 'High')], null=True)\n result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE,\n null=True)\n estimated_peak_mem = models.BigIntegerField(help_text=\n 'Estimated peak memory usage of the assembler, in megabytes.', null\n =True)\n uploaded_to_ena = models.NullBooleanField()\n bam_uploaded = models.NullBooleanField()\n new_ena_assembly = models.CharField(max_length=20, null=True)\n runs = models.ManyToManyField(Run, through='RunAssemblyJob',\n related_name='assemblyjobs', blank=True)\n\n\nclass RunAssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAssemblyJob'\n app_label = 'backlog'\n unique_together = ('run', 'assembly_job'),\n run = models.ForeignKey(Run, on_delete=models.CASCADE)\n assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)\n\n\nclass RunAssembly(models.Model):\n\n\n class Meta:\n db_table = 'RunAssembly'\n app_label = 'backlog'\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n\n\nclass AnnotationJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AnnotationJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=20)\n\n\nclass AnnotationJob(models.Model):\n PRIORITY_LOW = 1\n PRIORITY_MEDIUM = 2\n PRIORITY_HIGH = 3\n PRIORITIES = [(PRIORITY_LOW, 'Low'), (PRIORITY_MEDIUM, 'Medium'), (\n PRIORITY_HIGH, 'High')]\n RESULT_NO_TAX = 'no_tax'\n RESULT_NO_QC = 'no_qc'\n RESULT_NO_CDS = 'no_cds'\n RESULT_NO_CDS_TAX = 'no_cds_tax'\n RESULT_FULL = 'full'\n RESULT_CHOICES = (RESULT_NO_TAX, 'No Taxonomy results'), (RESULT_NO_QC,\n 'Failed QC'), (RESULT_NO_CDS, 'No CDS found'), (RESULT_FULL,\n 'No problems'), (RESULT_NO_CDS_TAX, 'No CDS or taxonomy found')\n pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AnnotationJobStatus, on_delete=models.\n DO_NOTHING, db_index=True)\n priority = models.IntegerField(choices=PRIORITIES)\n request = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n runs = models.ManyToManyField(Run, through='RunAnnotationJob',\n related_name='annotationjobs', blank=True)\n attempt = models.IntegerField(default=0)\n result_status = models.CharField(max_length=10, choices=RESULT_CHOICES,\n blank=True, null=True)\n\n\n class Meta:\n db_table = 'AnnotationJob'\n app_label = 'backlog'\n\n\nclass RunAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAnnotationJob'\n app_label = 'backlog'\n unique_together = ('run', 'annotation_job'),\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyAnnotationJob'\n app_label = 'backlog'\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING,\n related_name='assemblyannotationjobs')\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyProteinDB(models.Model):\n STATUS_COMPLETED = 1\n STATUS_FAIL = 0\n STATUS = (STATUS_COMPLETED, 'Completed'), (STATUS_FAIL, 'Failed')\n FAIL_FASTA_MISSING = 1\n FAIL_PIPELINE_VERSION = 2\n FAIL_FASTA_DIR = 3\n FAIL_SUPRESSED = 4\n FAIL_MGYC = 5\n FAIL_MGYP = 6\n FAIL_METADATA = 7\n FAIL_MGYC_MGYP = 8\n FAIL_MGYC_METADATA = 9\n FAIL_MGYP_METADATA = 10\n FAIL_MGYC_MGYP_METADATA = 11\n FAIL_LEGACY = 12\n FAIL_REASONS = (FAIL_FASTA_MISSING, 'Missing protein fasta file'), (\n FAIL_PIPELINE_VERSION,\n 'Assembly was added with higher version of pipeline'), (FAIL_FASTA_DIR,\n 'Assembly results directory is missing'), (FAIL_SUPRESSED,\n 'Suppressed assembly'), (FAIL_MGYC,\n 'Incorrect number of sequences for MGYC.fasta'), (FAIL_MGYP,\n 'Incorrect number of sequences for MGYP.fasta'), (FAIL_METADATA,\n 'Incorrect number of records for metadata'), (FAIL_MGYC_MGYP,\n 'Incorrect MGYC and MGYP but metadata is OK'), (FAIL_MGYC_METADATA,\n 'Incorrect number of sequences for MGYC.fasta and metadata table/file'\n ), (FAIL_MGYP_METADATA,\n 'Incorrect number of sequences for MGYP.fasta and metadata table/file'\n ), (FAIL_MGYC_MGYP_METADATA,\n 'Incorrect number of sequences for MGYC, MGYP and metadata'), (\n FAIL_LEGACY, 'Assembly marked as legacy')\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n status = models.IntegerField('status', choices=STATUS)\n fail_reason = models.IntegerField('fail_reason', choices=FAIL_REASONS,\n null=True, blank=True)\n pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.\n DO_NOTHING)\n last_updated = models.DateTimeField('Last updated', auto_now=True)\n assembly_id_pdb = models.IntegerField('id_pdb', null=True)\n legacy = models.IntegerField(\n 'assembly_id for new accession for legacy assembly', null=True,\n blank=True)\n\n\n class Meta:\n app_label = 'backlog'\n db_table = 'AssemblyProteinDB'\n",
"step-2": "<mask token>\n\n\nclass Pipeline(models.Model):\n\n\n class Meta:\n db_table = 'Pipeline'\n app_label = 'backlog'\n <mask token>\n\n\nclass Blacklist(models.Model):\n\n\n class Meta:\n db_table = 'Blacklist'\n app_label = 'backlog'\n date_blacklisted = models.DateField(auto_now_add=True)\n pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)\n error = models.ForeignKey(StudyError, on_delete=models.CASCADE)\n user = models.CharField(max_length=16)\n comment = models.TextField(null=False)\n\n\nclass Study(models.Model):\n\n\n class Meta:\n db_table = 'Study'\n app_label = 'backlog'\n unique_together = 'primary_accession', 'secondary_accession'\n primary_accession = models.CharField(max_length=20)\n secondary_accession = models.CharField(max_length=20)\n title = models.CharField(max_length=4000, null=True)\n description = models.CharField(max_length=4000, null=True, blank=True)\n scientific_name = models.CharField(max_length=200, null=True, blank=True)\n public = models.BooleanField(default=True)\n hold_date = models.DateField(null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n ena_last_update = models.DateField(null=True)\n mixs_compliant = models.NullBooleanField()\n pubmed = models.TextField(null=True)\n webin = models.CharField(max_length=100, null=True)\n blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE,\n null=True)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Run(models.Model):\n\n\n class Meta:\n db_table = 'Run'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n sample_primary_accession = models.CharField(max_length=20, blank=True,\n null=True)\n compressed_data_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)', null=True, blank=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, related_name=\n 'inferred_run_biome', on_delete=models.DO_NOTHING, null=True, blank\n =True)\n base_count = models.BigIntegerField(null=True, blank=True)\n read_count = models.BigIntegerField(null=True, blank=True)\n instrument_platform = models.CharField(max_length=4000)\n instrument_model = models.CharField(max_length=4000)\n library_strategy = models.CharField(max_length=150, null=True, db_index\n =True)\n library_layout = models.CharField(max_length=20)\n library_source = models.CharField(max_length=20, null=True)\n ena_last_update = models.DateField(null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n public = models.BooleanField(default=True)\n\n\nclass UserRequest(models.Model):\n\n\n class Meta:\n db_table = 'UserRequest'\n app_label = 'backlog'\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column=\n 'user_id')\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n priority = models.IntegerField(default=0)\n rt_ticket = models.IntegerField(unique=True)\n\n\nclass AssemblyType(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyType'\n app_label = 'backlog'\n assembly_type = models.CharField(max_length=80, unique=True, null=False)\n\n def __str__(self):\n return self.assembly_type\n\n\nclass Assembly(models.Model):\n\n\n class Meta:\n db_table = 'Assembly'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, db_column='inferred_biome_id',\n to_field='biome_id', related_name='inferred_assembly_biome',\n on_delete=models.DO_NOTHING, null=True, blank=True)\n public = models.BooleanField(default=True)\n ena_last_update = models.DateField(null=True)\n assembly_type = models.ForeignKey('AssemblyType', db_column=\n 'assembly_type_id', on_delete=models.DO_NOTHING, blank=True, null=True)\n\n\nclass Assembler(models.Model):\n\n\n class Meta:\n db_table = 'Assembler'\n app_label = 'backlog'\n name = models.CharField(max_length=20)\n version = models.CharField(max_length=20)\n\n\nclass AssemblyJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=100)\n\n\nclass AssemblyJobResult(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobResult'\n app_label = 'backlog'\n execution_time = models.BigIntegerField(help_text=\n 'Total execution time (including restarts) of the assembler, in seconds.'\n )\n peak_mem = models.BigIntegerField(help_text=\n 'Peak memory usage of the assembler, in megabytes.')\n n50 = models.IntegerField()\n l50 = models.IntegerField()\n num_contigs = models.IntegerField()\n assembly_length = models.BigIntegerField()\n largest_contig = models.BigIntegerField()\n coverage = models.FloatField()\n coverage_depth = models.FloatField()\n\n\nclass AssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJob'\n app_label = 'backlog'\n assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)\n submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING,\n null=True)\n request_id = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n input_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)')\n reason = models.TextField(null=True, help_text=\n 'Filled iff assembly will not be submitted to ENA, specifies the reason why.'\n )\n requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n priority = models.IntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3,\n 'High')], null=True)\n result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE,\n null=True)\n estimated_peak_mem = models.BigIntegerField(help_text=\n 'Estimated peak memory usage of the assembler, in megabytes.', null\n =True)\n uploaded_to_ena = models.NullBooleanField()\n bam_uploaded = models.NullBooleanField()\n new_ena_assembly = models.CharField(max_length=20, null=True)\n runs = models.ManyToManyField(Run, through='RunAssemblyJob',\n related_name='assemblyjobs', blank=True)\n\n\nclass RunAssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAssemblyJob'\n app_label = 'backlog'\n unique_together = ('run', 'assembly_job'),\n run = models.ForeignKey(Run, on_delete=models.CASCADE)\n assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)\n\n\nclass RunAssembly(models.Model):\n\n\n class Meta:\n db_table = 'RunAssembly'\n app_label = 'backlog'\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n\n\nclass AnnotationJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AnnotationJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=20)\n\n\nclass AnnotationJob(models.Model):\n PRIORITY_LOW = 1\n PRIORITY_MEDIUM = 2\n PRIORITY_HIGH = 3\n PRIORITIES = [(PRIORITY_LOW, 'Low'), (PRIORITY_MEDIUM, 'Medium'), (\n PRIORITY_HIGH, 'High')]\n RESULT_NO_TAX = 'no_tax'\n RESULT_NO_QC = 'no_qc'\n RESULT_NO_CDS = 'no_cds'\n RESULT_NO_CDS_TAX = 'no_cds_tax'\n RESULT_FULL = 'full'\n RESULT_CHOICES = (RESULT_NO_TAX, 'No Taxonomy results'), (RESULT_NO_QC,\n 'Failed QC'), (RESULT_NO_CDS, 'No CDS found'), (RESULT_FULL,\n 'No problems'), (RESULT_NO_CDS_TAX, 'No CDS or taxonomy found')\n pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AnnotationJobStatus, on_delete=models.\n DO_NOTHING, db_index=True)\n priority = models.IntegerField(choices=PRIORITIES)\n request = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n runs = models.ManyToManyField(Run, through='RunAnnotationJob',\n related_name='annotationjobs', blank=True)\n attempt = models.IntegerField(default=0)\n result_status = models.CharField(max_length=10, choices=RESULT_CHOICES,\n blank=True, null=True)\n\n\n class Meta:\n db_table = 'AnnotationJob'\n app_label = 'backlog'\n\n\nclass RunAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAnnotationJob'\n app_label = 'backlog'\n unique_together = ('run', 'annotation_job'),\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyAnnotationJob'\n app_label = 'backlog'\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING,\n related_name='assemblyannotationjobs')\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyProteinDB(models.Model):\n STATUS_COMPLETED = 1\n STATUS_FAIL = 0\n STATUS = (STATUS_COMPLETED, 'Completed'), (STATUS_FAIL, 'Failed')\n FAIL_FASTA_MISSING = 1\n FAIL_PIPELINE_VERSION = 2\n FAIL_FASTA_DIR = 3\n FAIL_SUPRESSED = 4\n FAIL_MGYC = 5\n FAIL_MGYP = 6\n FAIL_METADATA = 7\n FAIL_MGYC_MGYP = 8\n FAIL_MGYC_METADATA = 9\n FAIL_MGYP_METADATA = 10\n FAIL_MGYC_MGYP_METADATA = 11\n FAIL_LEGACY = 12\n FAIL_REASONS = (FAIL_FASTA_MISSING, 'Missing protein fasta file'), (\n FAIL_PIPELINE_VERSION,\n 'Assembly was added with higher version of pipeline'), (FAIL_FASTA_DIR,\n 'Assembly results directory is missing'), (FAIL_SUPRESSED,\n 'Suppressed assembly'), (FAIL_MGYC,\n 'Incorrect number of sequences for MGYC.fasta'), (FAIL_MGYP,\n 'Incorrect number of sequences for MGYP.fasta'), (FAIL_METADATA,\n 'Incorrect number of records for metadata'), (FAIL_MGYC_MGYP,\n 'Incorrect MGYC and MGYP but metadata is OK'), (FAIL_MGYC_METADATA,\n 'Incorrect number of sequences for MGYC.fasta and metadata table/file'\n ), (FAIL_MGYP_METADATA,\n 'Incorrect number of sequences for MGYP.fasta and metadata table/file'\n ), (FAIL_MGYC_MGYP_METADATA,\n 'Incorrect number of sequences for MGYC, MGYP and metadata'), (\n FAIL_LEGACY, 'Assembly marked as legacy')\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n status = models.IntegerField('status', choices=STATUS)\n fail_reason = models.IntegerField('fail_reason', choices=FAIL_REASONS,\n null=True, blank=True)\n pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.\n DO_NOTHING)\n last_updated = models.DateTimeField('Last updated', auto_now=True)\n assembly_id_pdb = models.IntegerField('id_pdb', null=True)\n legacy = models.IntegerField(\n 'assembly_id for new accession for legacy assembly', null=True,\n blank=True)\n\n\n class Meta:\n app_label = 'backlog'\n db_table = 'AssemblyProteinDB'\n",
"step-3": "<mask token>\n\n\nclass StudyError(models.Model):\n\n\n class Meta:\n db_table = 'StudyErrorType'\n app_label = 'backlog'\n <mask token>\n <mask token>\n\n\nclass Pipeline(models.Model):\n\n\n class Meta:\n db_table = 'Pipeline'\n app_label = 'backlog'\n version = models.FloatField(primary_key=True)\n\n\nclass Blacklist(models.Model):\n\n\n class Meta:\n db_table = 'Blacklist'\n app_label = 'backlog'\n date_blacklisted = models.DateField(auto_now_add=True)\n pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)\n error = models.ForeignKey(StudyError, on_delete=models.CASCADE)\n user = models.CharField(max_length=16)\n comment = models.TextField(null=False)\n\n\nclass Study(models.Model):\n\n\n class Meta:\n db_table = 'Study'\n app_label = 'backlog'\n unique_together = 'primary_accession', 'secondary_accession'\n primary_accession = models.CharField(max_length=20)\n secondary_accession = models.CharField(max_length=20)\n title = models.CharField(max_length=4000, null=True)\n description = models.CharField(max_length=4000, null=True, blank=True)\n scientific_name = models.CharField(max_length=200, null=True, blank=True)\n public = models.BooleanField(default=True)\n hold_date = models.DateField(null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n ena_last_update = models.DateField(null=True)\n mixs_compliant = models.NullBooleanField()\n pubmed = models.TextField(null=True)\n webin = models.CharField(max_length=100, null=True)\n blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE,\n null=True)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Run(models.Model):\n\n\n class Meta:\n db_table = 'Run'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n sample_primary_accession = models.CharField(max_length=20, blank=True,\n null=True)\n compressed_data_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)', null=True, blank=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, related_name=\n 'inferred_run_biome', on_delete=models.DO_NOTHING, null=True, blank\n =True)\n base_count = models.BigIntegerField(null=True, blank=True)\n read_count = models.BigIntegerField(null=True, blank=True)\n instrument_platform = models.CharField(max_length=4000)\n instrument_model = models.CharField(max_length=4000)\n library_strategy = models.CharField(max_length=150, null=True, db_index\n =True)\n library_layout = models.CharField(max_length=20)\n library_source = models.CharField(max_length=20, null=True)\n ena_last_update = models.DateField(null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n public = models.BooleanField(default=True)\n\n\nclass UserRequest(models.Model):\n\n\n class Meta:\n db_table = 'UserRequest'\n app_label = 'backlog'\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column=\n 'user_id')\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n priority = models.IntegerField(default=0)\n rt_ticket = models.IntegerField(unique=True)\n\n\nclass AssemblyType(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyType'\n app_label = 'backlog'\n assembly_type = models.CharField(max_length=80, unique=True, null=False)\n\n def __str__(self):\n return self.assembly_type\n\n\nclass Assembly(models.Model):\n\n\n class Meta:\n db_table = 'Assembly'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, db_column='inferred_biome_id',\n to_field='biome_id', related_name='inferred_assembly_biome',\n on_delete=models.DO_NOTHING, null=True, blank=True)\n public = models.BooleanField(default=True)\n ena_last_update = models.DateField(null=True)\n assembly_type = models.ForeignKey('AssemblyType', db_column=\n 'assembly_type_id', on_delete=models.DO_NOTHING, blank=True, null=True)\n\n\nclass Assembler(models.Model):\n\n\n class Meta:\n db_table = 'Assembler'\n app_label = 'backlog'\n name = models.CharField(max_length=20)\n version = models.CharField(max_length=20)\n\n\nclass AssemblyJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=100)\n\n\nclass AssemblyJobResult(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobResult'\n app_label = 'backlog'\n execution_time = models.BigIntegerField(help_text=\n 'Total execution time (including restarts) of the assembler, in seconds.'\n )\n peak_mem = models.BigIntegerField(help_text=\n 'Peak memory usage of the assembler, in megabytes.')\n n50 = models.IntegerField()\n l50 = models.IntegerField()\n num_contigs = models.IntegerField()\n assembly_length = models.BigIntegerField()\n largest_contig = models.BigIntegerField()\n coverage = models.FloatField()\n coverage_depth = models.FloatField()\n\n\nclass AssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJob'\n app_label = 'backlog'\n assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)\n submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING,\n null=True)\n request_id = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n input_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)')\n reason = models.TextField(null=True, help_text=\n 'Filled iff assembly will not be submitted to ENA, specifies the reason why.'\n )\n requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n priority = models.IntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3,\n 'High')], null=True)\n result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE,\n null=True)\n estimated_peak_mem = models.BigIntegerField(help_text=\n 'Estimated peak memory usage of the assembler, in megabytes.', null\n =True)\n uploaded_to_ena = models.NullBooleanField()\n bam_uploaded = models.NullBooleanField()\n new_ena_assembly = models.CharField(max_length=20, null=True)\n runs = models.ManyToManyField(Run, through='RunAssemblyJob',\n related_name='assemblyjobs', blank=True)\n\n\nclass RunAssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAssemblyJob'\n app_label = 'backlog'\n unique_together = ('run', 'assembly_job'),\n run = models.ForeignKey(Run, on_delete=models.CASCADE)\n assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)\n\n\nclass RunAssembly(models.Model):\n\n\n class Meta:\n db_table = 'RunAssembly'\n app_label = 'backlog'\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n\n\nclass AnnotationJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AnnotationJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=20)\n\n\nclass AnnotationJob(models.Model):\n PRIORITY_LOW = 1\n PRIORITY_MEDIUM = 2\n PRIORITY_HIGH = 3\n PRIORITIES = [(PRIORITY_LOW, 'Low'), (PRIORITY_MEDIUM, 'Medium'), (\n PRIORITY_HIGH, 'High')]\n RESULT_NO_TAX = 'no_tax'\n RESULT_NO_QC = 'no_qc'\n RESULT_NO_CDS = 'no_cds'\n RESULT_NO_CDS_TAX = 'no_cds_tax'\n RESULT_FULL = 'full'\n RESULT_CHOICES = (RESULT_NO_TAX, 'No Taxonomy results'), (RESULT_NO_QC,\n 'Failed QC'), (RESULT_NO_CDS, 'No CDS found'), (RESULT_FULL,\n 'No problems'), (RESULT_NO_CDS_TAX, 'No CDS or taxonomy found')\n pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AnnotationJobStatus, on_delete=models.\n DO_NOTHING, db_index=True)\n priority = models.IntegerField(choices=PRIORITIES)\n request = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n runs = models.ManyToManyField(Run, through='RunAnnotationJob',\n related_name='annotationjobs', blank=True)\n attempt = models.IntegerField(default=0)\n result_status = models.CharField(max_length=10, choices=RESULT_CHOICES,\n blank=True, null=True)\n\n\n class Meta:\n db_table = 'AnnotationJob'\n app_label = 'backlog'\n\n\nclass RunAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAnnotationJob'\n app_label = 'backlog'\n unique_together = ('run', 'annotation_job'),\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyAnnotationJob'\n app_label = 'backlog'\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING,\n related_name='assemblyannotationjobs')\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyProteinDB(models.Model):\n STATUS_COMPLETED = 1\n STATUS_FAIL = 0\n STATUS = (STATUS_COMPLETED, 'Completed'), (STATUS_FAIL, 'Failed')\n FAIL_FASTA_MISSING = 1\n FAIL_PIPELINE_VERSION = 2\n FAIL_FASTA_DIR = 3\n FAIL_SUPRESSED = 4\n FAIL_MGYC = 5\n FAIL_MGYP = 6\n FAIL_METADATA = 7\n FAIL_MGYC_MGYP = 8\n FAIL_MGYC_METADATA = 9\n FAIL_MGYP_METADATA = 10\n FAIL_MGYC_MGYP_METADATA = 11\n FAIL_LEGACY = 12\n FAIL_REASONS = (FAIL_FASTA_MISSING, 'Missing protein fasta file'), (\n FAIL_PIPELINE_VERSION,\n 'Assembly was added with higher version of pipeline'), (FAIL_FASTA_DIR,\n 'Assembly results directory is missing'), (FAIL_SUPRESSED,\n 'Suppressed assembly'), (FAIL_MGYC,\n 'Incorrect number of sequences for MGYC.fasta'), (FAIL_MGYP,\n 'Incorrect number of sequences for MGYP.fasta'), (FAIL_METADATA,\n 'Incorrect number of records for metadata'), (FAIL_MGYC_MGYP,\n 'Incorrect MGYC and MGYP but metadata is OK'), (FAIL_MGYC_METADATA,\n 'Incorrect number of sequences for MGYC.fasta and metadata table/file'\n ), (FAIL_MGYP_METADATA,\n 'Incorrect number of sequences for MGYP.fasta and metadata table/file'\n ), (FAIL_MGYC_MGYP_METADATA,\n 'Incorrect number of sequences for MGYC, MGYP and metadata'), (\n FAIL_LEGACY, 'Assembly marked as legacy')\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n status = models.IntegerField('status', choices=STATUS)\n fail_reason = models.IntegerField('fail_reason', choices=FAIL_REASONS,\n null=True, blank=True)\n pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.\n DO_NOTHING)\n last_updated = models.DateTimeField('Last updated', auto_now=True)\n assembly_id_pdb = models.IntegerField('id_pdb', null=True)\n legacy = models.IntegerField(\n 'assembly_id for new accession for legacy assembly', null=True,\n blank=True)\n\n\n class Meta:\n app_label = 'backlog'\n db_table = 'AssemblyProteinDB'\n",
"step-4": "<mask token>\n\n\nclass User(models.Model):\n\n\n class Meta:\n db_table = 'User'\n app_label = 'backlog'\n webin_id = models.CharField(\"ENA's submission account id\", max_length=\n 15, unique=True, primary_key=True)\n registered = models.BooleanField(\n \"A copy of ENA's ROLE_METAGENOME_SUBMITTER flag. Set to True if submitter is registered with EMG.\"\n , default=False)\n consent_given = models.BooleanField(\n \"A copy of ENA's ROLE_METAGENOME_ANALYSIS flag. Set to True if submitter gave permission to access and analyse their private data.\"\n , default=False)\n email_address = models.CharField('Submitters email address.',\n max_length=200)\n first_name = models.CharField(max_length=30, null=True)\n surname = models.CharField(max_length=50, null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n\n\nclass Submission(models.Model):\n\n\n class Meta:\n db_table = 'Submission'\n app_label = 'backlog'\n primary_accession = models.CharField(max_length=20, unique=True, null=True)\n secondary_accession = models.CharField(max_length=20, unique=True, null\n =True)\n uuid = models.CharField(max_length=100, blank=True, unique=True, null=True)\n created = models.DateTimeField(default=timezone.now)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Biome(models.Model):\n\n\n class Meta:\n db_table = 'Biome'\n app_label = 'backlog'\n biome_id = models.IntegerField(primary_key=True, unique=True)\n biome_name = models.CharField(max_length=60)\n lft = models.IntegerField()\n rgt = models.IntegerField()\n depth = models.IntegerField()\n lineage = models.CharField(max_length=500)\n\n\nclass StudyError(models.Model):\n\n\n class Meta:\n db_table = 'StudyErrorType'\n app_label = 'backlog'\n name = models.CharField(max_length=100, unique=True)\n description = models.TextField()\n\n\nclass Pipeline(models.Model):\n\n\n class Meta:\n db_table = 'Pipeline'\n app_label = 'backlog'\n version = models.FloatField(primary_key=True)\n\n\nclass Blacklist(models.Model):\n\n\n class Meta:\n db_table = 'Blacklist'\n app_label = 'backlog'\n date_blacklisted = models.DateField(auto_now_add=True)\n pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)\n error = models.ForeignKey(StudyError, on_delete=models.CASCADE)\n user = models.CharField(max_length=16)\n comment = models.TextField(null=False)\n\n\nclass Study(models.Model):\n\n\n class Meta:\n db_table = 'Study'\n app_label = 'backlog'\n unique_together = 'primary_accession', 'secondary_accession'\n primary_accession = models.CharField(max_length=20)\n secondary_accession = models.CharField(max_length=20)\n title = models.CharField(max_length=4000, null=True)\n description = models.CharField(max_length=4000, null=True, blank=True)\n scientific_name = models.CharField(max_length=200, null=True, blank=True)\n public = models.BooleanField(default=True)\n hold_date = models.DateField(null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n ena_last_update = models.DateField(null=True)\n mixs_compliant = models.NullBooleanField()\n pubmed = models.TextField(null=True)\n webin = models.CharField(max_length=100, null=True)\n blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE,\n null=True)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Run(models.Model):\n\n\n class Meta:\n db_table = 'Run'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n sample_primary_accession = models.CharField(max_length=20, blank=True,\n null=True)\n compressed_data_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)', null=True, blank=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, related_name=\n 'inferred_run_biome', on_delete=models.DO_NOTHING, null=True, blank\n =True)\n base_count = models.BigIntegerField(null=True, blank=True)\n read_count = models.BigIntegerField(null=True, blank=True)\n instrument_platform = models.CharField(max_length=4000)\n instrument_model = models.CharField(max_length=4000)\n library_strategy = models.CharField(max_length=150, null=True, db_index\n =True)\n library_layout = models.CharField(max_length=20)\n library_source = models.CharField(max_length=20, null=True)\n ena_last_update = models.DateField(null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n public = models.BooleanField(default=True)\n\n\nclass UserRequest(models.Model):\n\n\n class Meta:\n db_table = 'UserRequest'\n app_label = 'backlog'\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column=\n 'user_id')\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n priority = models.IntegerField(default=0)\n rt_ticket = models.IntegerField(unique=True)\n\n\nclass AssemblyType(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyType'\n app_label = 'backlog'\n assembly_type = models.CharField(max_length=80, unique=True, null=False)\n\n def __str__(self):\n return self.assembly_type\n\n\nclass Assembly(models.Model):\n\n\n class Meta:\n db_table = 'Assembly'\n app_label = 'backlog'\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n biome = models.ForeignKey(Biome, to_field='biome_id', db_column=\n 'biome_id', on_delete=models.DO_NOTHING, null=True, blank=True)\n inferred_biome = models.ForeignKey(Biome, db_column='inferred_biome_id',\n to_field='biome_id', related_name='inferred_assembly_biome',\n on_delete=models.DO_NOTHING, null=True, blank=True)\n public = models.BooleanField(default=True)\n ena_last_update = models.DateField(null=True)\n assembly_type = models.ForeignKey('AssemblyType', db_column=\n 'assembly_type_id', on_delete=models.DO_NOTHING, blank=True, null=True)\n\n\nclass Assembler(models.Model):\n\n\n class Meta:\n db_table = 'Assembler'\n app_label = 'backlog'\n name = models.CharField(max_length=20)\n version = models.CharField(max_length=20)\n\n\nclass AssemblyJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=100)\n\n\nclass AssemblyJobResult(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJobResult'\n app_label = 'backlog'\n execution_time = models.BigIntegerField(help_text=\n 'Total execution time (including restarts) of the assembler, in seconds.'\n )\n peak_mem = models.BigIntegerField(help_text=\n 'Peak memory usage of the assembler, in megabytes.')\n n50 = models.IntegerField()\n l50 = models.IntegerField()\n num_contigs = models.IntegerField()\n assembly_length = models.BigIntegerField()\n largest_contig = models.BigIntegerField()\n coverage = models.FloatField()\n coverage_depth = models.FloatField()\n\n\nclass AssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyJob'\n app_label = 'backlog'\n assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)\n submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING,\n null=True)\n request_id = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n input_size = models.BigIntegerField(help_text=\n 'Sum of filesizes of compressed input. (bytes)')\n reason = models.TextField(null=True, help_text=\n 'Filled iff assembly will not be submitted to ENA, specifies the reason why.'\n )\n requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n priority = models.IntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3,\n 'High')], null=True)\n result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE,\n null=True)\n estimated_peak_mem = models.BigIntegerField(help_text=\n 'Estimated peak memory usage of the assembler, in megabytes.', null\n =True)\n uploaded_to_ena = models.NullBooleanField()\n bam_uploaded = models.NullBooleanField()\n new_ena_assembly = models.CharField(max_length=20, null=True)\n runs = models.ManyToManyField(Run, through='RunAssemblyJob',\n related_name='assemblyjobs', blank=True)\n\n\nclass RunAssemblyJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAssemblyJob'\n app_label = 'backlog'\n unique_together = ('run', 'assembly_job'),\n run = models.ForeignKey(Run, on_delete=models.CASCADE)\n assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)\n\n\nclass RunAssembly(models.Model):\n\n\n class Meta:\n db_table = 'RunAssembly'\n app_label = 'backlog'\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n\n\nclass AnnotationJobStatus(models.Model):\n\n\n class Meta:\n db_table = 'AnnotationJobStatus'\n app_label = 'backlog'\n description = models.CharField(max_length=20)\n\n\nclass AnnotationJob(models.Model):\n PRIORITY_LOW = 1\n PRIORITY_MEDIUM = 2\n PRIORITY_HIGH = 3\n PRIORITIES = [(PRIORITY_LOW, 'Low'), (PRIORITY_MEDIUM, 'Medium'), (\n PRIORITY_HIGH, 'High')]\n RESULT_NO_TAX = 'no_tax'\n RESULT_NO_QC = 'no_qc'\n RESULT_NO_CDS = 'no_cds'\n RESULT_NO_CDS_TAX = 'no_cds_tax'\n RESULT_FULL = 'full'\n RESULT_CHOICES = (RESULT_NO_TAX, 'No Taxonomy results'), (RESULT_NO_QC,\n 'Failed QC'), (RESULT_NO_CDS, 'No CDS found'), (RESULT_FULL,\n 'No problems'), (RESULT_NO_CDS_TAX, 'No CDS or taxonomy found')\n pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AnnotationJobStatus, on_delete=models.\n DO_NOTHING, db_index=True)\n priority = models.IntegerField(choices=PRIORITIES)\n request = models.ForeignKey(UserRequest, on_delete=models.DO_NOTHING,\n null=True, db_column='request_id')\n directory = models.CharField(max_length=255, null=True, blank=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n runs = models.ManyToManyField(Run, through='RunAnnotationJob',\n related_name='annotationjobs', blank=True)\n attempt = models.IntegerField(default=0)\n result_status = models.CharField(max_length=10, choices=RESULT_CHOICES,\n blank=True, null=True)\n\n\n class Meta:\n db_table = 'AnnotationJob'\n app_label = 'backlog'\n\n\nclass RunAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'RunAnnotationJob'\n app_label = 'backlog'\n unique_together = ('run', 'annotation_job'),\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyAnnotationJob(models.Model):\n\n\n class Meta:\n db_table = 'AssemblyAnnotationJob'\n app_label = 'backlog'\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING,\n related_name='assemblyannotationjobs')\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyProteinDB(models.Model):\n STATUS_COMPLETED = 1\n STATUS_FAIL = 0\n STATUS = (STATUS_COMPLETED, 'Completed'), (STATUS_FAIL, 'Failed')\n FAIL_FASTA_MISSING = 1\n FAIL_PIPELINE_VERSION = 2\n FAIL_FASTA_DIR = 3\n FAIL_SUPRESSED = 4\n FAIL_MGYC = 5\n FAIL_MGYP = 6\n FAIL_METADATA = 7\n FAIL_MGYC_MGYP = 8\n FAIL_MGYC_METADATA = 9\n FAIL_MGYP_METADATA = 10\n FAIL_MGYC_MGYP_METADATA = 11\n FAIL_LEGACY = 12\n FAIL_REASONS = (FAIL_FASTA_MISSING, 'Missing protein fasta file'), (\n FAIL_PIPELINE_VERSION,\n 'Assembly was added with higher version of pipeline'), (FAIL_FASTA_DIR,\n 'Assembly results directory is missing'), (FAIL_SUPRESSED,\n 'Suppressed assembly'), (FAIL_MGYC,\n 'Incorrect number of sequences for MGYC.fasta'), (FAIL_MGYP,\n 'Incorrect number of sequences for MGYP.fasta'), (FAIL_METADATA,\n 'Incorrect number of records for metadata'), (FAIL_MGYC_MGYP,\n 'Incorrect MGYC and MGYP but metadata is OK'), (FAIL_MGYC_METADATA,\n 'Incorrect number of sequences for MGYC.fasta and metadata table/file'\n ), (FAIL_MGYP_METADATA,\n 'Incorrect number of sequences for MGYP.fasta and metadata table/file'\n ), (FAIL_MGYC_MGYP_METADATA,\n 'Incorrect number of sequences for MGYC, MGYP and metadata'), (\n FAIL_LEGACY, 'Assembly marked as legacy')\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n status = models.IntegerField('status', choices=STATUS)\n fail_reason = models.IntegerField('fail_reason', choices=FAIL_REASONS,\n null=True, blank=True)\n pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.\n DO_NOTHING)\n last_updated = models.DateTimeField('Last updated', auto_now=True)\n assembly_id_pdb = models.IntegerField('id_pdb', null=True)\n legacy = models.IntegerField(\n 'assembly_id for new accession for legacy assembly', null=True,\n blank=True)\n\n\n class Meta:\n app_label = 'backlog'\n db_table = 'AssemblyProteinDB'\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\n\n\nclass User(models.Model):\n class Meta:\n db_table = \"User\"\n app_label = \"backlog\"\n\n webin_id = models.CharField(\n \"ENA's submission account id\", max_length=15, unique=True, primary_key=True\n )\n registered = models.BooleanField(\n \"A copy of ENA's ROLE_METAGENOME_SUBMITTER flag. Set to True if submitter is registered with EMG.\",\n default=False,\n )\n consent_given = models.BooleanField(\n \"A copy of ENA's ROLE_METAGENOME_ANALYSIS flag. Set to True if submitter gave permission to access and analyse their private data.\",\n default=False,\n )\n email_address = models.CharField(\"Submitters email address.\", max_length=200)\n first_name = models.CharField(max_length=30, null=True)\n surname = models.CharField(max_length=50, null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n\n\nclass Submission(models.Model):\n class Meta:\n db_table = \"Submission\"\n app_label = \"backlog\"\n\n primary_accession = models.CharField(max_length=20, unique=True, null=True)\n secondary_accession = models.CharField(max_length=20, unique=True, null=True)\n uuid = models.CharField(max_length=100, blank=True, unique=True, null=True)\n created = models.DateTimeField(default=timezone.now)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Biome(models.Model):\n class Meta:\n db_table = \"Biome\"\n app_label = \"backlog\"\n\n biome_id = models.IntegerField(primary_key=True, unique=True)\n biome_name = models.CharField(max_length=60)\n lft = models.IntegerField()\n rgt = models.IntegerField()\n depth = models.IntegerField()\n lineage = models.CharField(max_length=500)\n\n\nclass StudyError(models.Model):\n class Meta:\n db_table = \"StudyErrorType\"\n app_label = \"backlog\"\n\n name = models.CharField(max_length=100, unique=True)\n description = models.TextField()\n\n\nclass Pipeline(models.Model):\n class Meta:\n db_table = \"Pipeline\"\n app_label = \"backlog\"\n\n version = models.FloatField(primary_key=True)\n\n\nclass Blacklist(models.Model):\n class Meta:\n db_table = \"Blacklist\"\n app_label = \"backlog\"\n\n date_blacklisted = models.DateField(auto_now_add=True)\n pipeline_version = models.ForeignKey(Pipeline, on_delete=models.CASCADE)\n error = models.ForeignKey(StudyError, on_delete=models.CASCADE)\n user = models.CharField(max_length=16)\n comment = models.TextField(null=False)\n\n\nclass Study(models.Model):\n class Meta:\n db_table = \"Study\"\n app_label = \"backlog\"\n unique_together = (\"primary_accession\", \"secondary_accession\")\n\n primary_accession = models.CharField(max_length=20)\n secondary_accession = models.CharField(max_length=20)\n title = models.CharField(max_length=4000, null=True)\n description = models.CharField(max_length=4000, null=True, blank=True)\n scientific_name = models.CharField(max_length=200, null=True, blank=True)\n public = models.BooleanField(default=True)\n hold_date = models.DateField(null=True)\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n ena_last_update = models.DateField(null=True)\n mixs_compliant = models.NullBooleanField()\n pubmed = models.TextField(null=True)\n webin = models.CharField(max_length=100, null=True)\n blacklisted = models.ForeignKey(Blacklist, on_delete=models.CASCADE, null=True)\n submitter = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n\nclass Run(models.Model):\n class Meta:\n db_table = \"Run\"\n app_label = \"backlog\"\n\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n\n primary_accession = models.CharField(max_length=20, unique=True)\n sample_primary_accession = models.CharField(max_length=20, blank=True, null=True)\n compressed_data_size = models.BigIntegerField(\n help_text=\"Sum of filesizes of compressed input. (bytes)\", null=True, blank=True\n )\n biome = models.ForeignKey(\n Biome,\n to_field=\"biome_id\",\n db_column=\"biome_id\",\n on_delete=models.DO_NOTHING,\n null=True,\n blank=True,\n )\n inferred_biome = models.ForeignKey(\n Biome,\n related_name=\"inferred_run_biome\",\n on_delete=models.DO_NOTHING,\n null=True,\n blank=True,\n )\n base_count = models.BigIntegerField(null=True, blank=True)\n read_count = models.BigIntegerField(null=True, blank=True)\n instrument_platform = models.CharField(max_length=4000)\n instrument_model = models.CharField(max_length=4000)\n library_strategy = models.CharField(max_length=150, null=True, db_index=True)\n library_layout = models.CharField(max_length=20)\n library_source = models.CharField(max_length=20, null=True)\n ena_last_update = models.DateField(null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n public = models.BooleanField(default=True)\n\n\nclass UserRequest(models.Model):\n class Meta:\n db_table = \"UserRequest\"\n app_label = \"backlog\"\n\n user = models.ForeignKey(User, on_delete=models.DO_NOTHING, db_column=\"user_id\")\n first_created = models.DateTimeField(auto_now_add=True, null=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n priority = models.IntegerField(default=0)\n rt_ticket = models.IntegerField(unique=True)\n\n\nclass AssemblyType(models.Model):\n class Meta:\n db_table = \"AssemblyType\"\n app_label = \"backlog\"\n\n assembly_type = models.CharField(max_length=80, unique=True, null=False)\n\n def __str__(self):\n return self.assembly_type\n\n\n# Assemblies received from ENA\nclass Assembly(models.Model):\n class Meta:\n db_table = \"Assembly\"\n app_label = \"backlog\"\n\n study = models.ForeignKey(Study, on_delete=models.CASCADE)\n primary_accession = models.CharField(max_length=20, unique=True)\n biome = models.ForeignKey(\n Biome,\n to_field=\"biome_id\",\n db_column=\"biome_id\",\n on_delete=models.DO_NOTHING,\n null=True,\n blank=True,\n )\n inferred_biome = models.ForeignKey(\n Biome,\n db_column=\"inferred_biome_id\",\n to_field=\"biome_id\",\n related_name=\"inferred_assembly_biome\",\n on_delete=models.DO_NOTHING,\n null=True,\n blank=True,\n )\n public = models.BooleanField(default=True)\n ena_last_update = models.DateField(null=True)\n assembly_type = models.ForeignKey(\n \"AssemblyType\",\n db_column=\"assembly_type_id\",\n on_delete=models.DO_NOTHING,\n blank=True,\n null=True,\n )\n\n\nclass Assembler(models.Model):\n class Meta:\n db_table = \"Assembler\"\n app_label = \"backlog\"\n\n name = models.CharField(max_length=20)\n version = models.CharField(max_length=20)\n\n\nclass AssemblyJobStatus(models.Model):\n class Meta:\n db_table = \"AssemblyJobStatus\"\n app_label = \"backlog\"\n\n description = models.CharField(max_length=100)\n\n\nclass AssemblyJobResult(models.Model):\n class Meta:\n db_table = \"AssemblyJobResult\"\n app_label = \"backlog\"\n\n execution_time = models.BigIntegerField(\n help_text=\"Total execution time (including restarts) of the assembler, in seconds.\"\n )\n peak_mem = models.BigIntegerField(\n help_text=\"Peak memory usage of the assembler, in megabytes.\"\n )\n\n n50 = models.IntegerField()\n l50 = models.IntegerField()\n num_contigs = models.IntegerField()\n assembly_length = models.BigIntegerField()\n largest_contig = models.BigIntegerField()\n coverage = models.FloatField()\n # average depth of coverage of the assembly\n coverage_depth = models.FloatField()\n\n\nclass AssemblyJob(models.Model):\n class Meta:\n db_table = \"AssemblyJob\"\n app_label = \"backlog\"\n\n assembler = models.ForeignKey(Assembler, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(AssemblyJobStatus, on_delete=models.DO_NOTHING)\n submission = models.ForeignKey(Submission, on_delete=models.DO_NOTHING, null=True)\n\n request_id = models.ForeignKey(\n UserRequest,\n on_delete=models.DO_NOTHING,\n null=True,\n db_column=\"request_id\",\n )\n directory = models.CharField(max_length=255, null=True, blank=True)\n\n input_size = models.BigIntegerField(\n help_text=\"Sum of filesizes of compressed input. (bytes)\"\n )\n reason = models.TextField(\n null=True,\n help_text=\"Filled iff assembly will not be submitted to ENA, specifies the reason why.\",\n )\n requester = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True)\n\n priority = models.IntegerField(\n choices=[(1, \"Low\"), (2, \"Medium\"), (3, \"High\")], null=True\n )\n result = models.ForeignKey(AssemblyJobResult, on_delete=models.CASCADE, null=True)\n estimated_peak_mem = models.BigIntegerField(\n help_text=\"Estimated peak memory usage of the assembler, in megabytes.\",\n null=True,\n )\n\n uploaded_to_ena = models.NullBooleanField()\n bam_uploaded = models.NullBooleanField()\n new_ena_assembly = models.CharField(max_length=20, null=True)\n runs = models.ManyToManyField(\n Run, through=\"RunAssemblyJob\", related_name=\"assemblyjobs\", blank=True\n )\n\n\n# Assembly instances for runs\nclass RunAssemblyJob(models.Model):\n class Meta:\n db_table = \"RunAssemblyJob\"\n app_label = \"backlog\"\n unique_together = ((\"run\", \"assembly_job\"),)\n\n run = models.ForeignKey(Run, on_delete=models.CASCADE)\n assembly_job = models.ForeignKey(AssemblyJob, on_delete=models.CASCADE)\n\n\n# Show all runs used to create an assembly\nclass RunAssembly(models.Model):\n class Meta:\n db_table = \"RunAssembly\"\n app_label = \"backlog\"\n\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n\n\nclass AnnotationJobStatus(models.Model):\n class Meta:\n db_table = \"AnnotationJobStatus\"\n app_label = \"backlog\"\n\n description = models.CharField(max_length=20)\n\n\nclass AnnotationJob(models.Model):\n\n PRIORITY_LOW = 1\n PRIORITY_MEDIUM = 2\n PRIORITY_HIGH = 3\n\n PRIORITIES = [\n (PRIORITY_LOW, \"Low\"),\n (PRIORITY_MEDIUM, \"Medium\"),\n (PRIORITY_HIGH, \"High\"),\n ]\n\n # Pipeline execution result status.\n # For example the pipeline may find no CDS so most steps\n # aren't going to be executed for this data set.\n RESULT_NO_TAX = \"no_tax\"\n RESULT_NO_QC = \"no_qc\"\n RESULT_NO_CDS = \"no_cds\"\n RESULT_NO_CDS_TAX = \"no_cds_tax\"\n # pipeline completed all the stages\n RESULT_FULL = \"full\"\n\n RESULT_CHOICES = (\n (RESULT_NO_TAX, \"No Taxonomy results\"),\n (RESULT_NO_QC, \"Failed QC\"),\n (RESULT_NO_CDS, \"No CDS found\"),\n (RESULT_FULL, \"No problems\"),\n (RESULT_NO_CDS_TAX, \"No CDS or taxonomy found\"),\n )\n\n pipeline = models.ForeignKey(Pipeline, on_delete=models.DO_NOTHING)\n status = models.ForeignKey(\n AnnotationJobStatus, on_delete=models.DO_NOTHING, db_index=True\n )\n priority = models.IntegerField(choices=PRIORITIES)\n request = models.ForeignKey(\n UserRequest, on_delete=models.DO_NOTHING, null=True, db_column=\"request_id\"\n )\n directory = models.CharField(max_length=255, null=True, blank=True)\n last_updated = models.DateTimeField(auto_now=True, null=True)\n runs = models.ManyToManyField(\n Run, through=\"RunAnnotationJob\", related_name=\"annotationjobs\", blank=True\n )\n attempt = models.IntegerField(default=0)\n\n result_status = models.CharField(\n max_length=10, choices=RESULT_CHOICES, blank=True, null=True\n )\n\n class Meta:\n db_table = \"AnnotationJob\"\n app_label = \"backlog\"\n\n\n# Annotation instance for a run\nclass RunAnnotationJob(models.Model):\n class Meta:\n db_table = \"RunAnnotationJob\"\n app_label = \"backlog\"\n unique_together = ((\"run\", \"annotation_job\"),)\n\n run = models.ForeignKey(Run, on_delete=models.DO_NOTHING)\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyAnnotationJob(models.Model):\n class Meta:\n db_table = \"AssemblyAnnotationJob\"\n app_label = \"backlog\"\n\n assembly = models.ForeignKey(\n Assembly, on_delete=models.DO_NOTHING, related_name=\"assemblyannotationjobs\"\n )\n annotation_job = models.ForeignKey(AnnotationJob, on_delete=models.CASCADE)\n\n\nclass AssemblyProteinDB(models.Model):\n\n STATUS_COMPLETED = 1\n STATUS_FAIL = 0\n STATUS = ((STATUS_COMPLETED, \"Completed\"), (STATUS_FAIL, \"Failed\"))\n\n FAIL_FASTA_MISSING = 1\n FAIL_PIPELINE_VERSION = 2\n FAIL_FASTA_DIR = 3\n FAIL_SUPRESSED = 4\n FAIL_MGYC = 5\n FAIL_MGYP = 6\n FAIL_METADATA = 7\n FAIL_MGYC_MGYP = 8\n FAIL_MGYC_METADATA = 9\n FAIL_MGYP_METADATA = 10\n FAIL_MGYC_MGYP_METADATA = 11\n FAIL_LEGACY = 12\n\n FAIL_REASONS = (\n (FAIL_FASTA_MISSING, \"Missing protein fasta file\"),\n (FAIL_PIPELINE_VERSION, \"Assembly was added with higher version of pipeline\"),\n (FAIL_FASTA_DIR, \"Assembly results directory is missing\"),\n (FAIL_SUPRESSED, \"Suppressed assembly\"),\n (FAIL_MGYC, \"Incorrect number of sequences for MGYC.fasta\"),\n (FAIL_MGYP, \"Incorrect number of sequences for MGYP.fasta\"),\n (FAIL_METADATA, \"Incorrect number of records for metadata\"),\n (FAIL_MGYC_MGYP, \"Incorrect MGYC and MGYP but metadata is OK\"),\n (FAIL_MGYC_METADATA, \"Incorrect number of sequences for MGYC.fasta and metadata table/file\"),\n (FAIL_MGYP_METADATA, \"Incorrect number of sequences for MGYP.fasta and metadata table/file\"),\n (FAIL_MGYC_MGYP_METADATA, \"Incorrect number of sequences for MGYC, MGYP and metadata\"),\n (FAIL_LEGACY, \"Assembly marked as legacy\"),\n )\n\n assembly = models.ForeignKey(Assembly, on_delete=models.DO_NOTHING)\n status = models.IntegerField(\"status\", choices=STATUS)\n fail_reason = models.IntegerField(\n \"fail_reason\", choices=FAIL_REASONS, null=True, blank=True\n )\n pipeline = models.ForeignKey(Pipeline, null=True, on_delete=models.DO_NOTHING)\n last_updated = models.DateTimeField(\"Last updated\", auto_now=True)\n assembly_id_pdb = models.IntegerField(\"id_pdb\", null=True)\n legacy = models.IntegerField(\"assembly_id for new accession for legacy assembly\", null=True, blank=True)\n\n class Meta:\n app_label = \"backlog\"\n db_table = \"AssemblyProteinDB\"\n",
"step-ids": [
35,
36,
38,
45,
47
]
}
|
[
35,
36,
38,
45,
47
] |
<|reserved_special_token_0|>
class link_to_block(nodes.Admonition, nodes.Element):
<|reserved_special_token_0|>
pass
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {'right-side': bool, 'label': str}
def run(self):
new_content = ViewList()
ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',
'Link To'), ''.join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class link_to_block(nodes.Admonition, nodes.Element):
""" Node for inserting a link to button."""
pass
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {'right-side': bool, 'label': str}
def run(self):
new_content = ViewList()
ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',
'Link To'), ''.join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class link_to_block(nodes.Admonition, nodes.Element):
""" Node for inserting a link to button."""
pass
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {'right-side': bool, 'label': str}
def run(self):
new_content = ViewList()
ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',
'Link To'), ''.join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
<|reserved_special_token_0|>
def setup(app):
app.add_directive('link-to-block', LinkToBlock)
app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class link_to_block(nodes.Admonition, nodes.Element):
""" Node for inserting a link to button."""
pass
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {'right-side': bool, 'label': str}
def run(self):
new_content = ViewList()
ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',
'Link To'), ''.join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
def visit_ltb_html(self, node):
""" Visit link to block"""
position = node.get('right-side', True)
self.body.append("<div class='{0}'>".format('buttonNext' if position else
'buttonPrevious'))
def depart_ltb_html(self, node):
""" Depart link to block"""
self.depart_admonition(node)
def setup(app):
app.add_directive('link-to-block', LinkToBlock)
app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))
<|reserved_special_token_1|>
# System import
import os
# Docutils import
from docutils import nodes
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.statemachine import ViewList
# Add node
class link_to_block(nodes.Admonition, nodes.Element):
""" Node for inserting a link to button."""
pass
# Add directive
class LinkToBlock(BaseAdmonition):
""" Hidden technical block"""
node_class = link_to_block
has_content = False
required_arguments = 1
optional_arguments = 2
final_argument_whitespace = True
option_spec = {
"right-side": bool,
"label": str
}
def run(self):
# Construct an empty node
new_content = ViewList()
ref = u":ref:`{0} <{1}>`".format(
self.options.get("label", "Link To"),
"".join(self.arguments))
new_content.append(ref, source=self.content)
self.content = new_content
return super(LinkToBlock, self).run()
# Add html writer
def visit_ltb_html(self, node):
""" Visit link to block"""
# Generate the html div
position = node.get("right-side", True)
self.body.append("<div class='{0}'>".format(
"buttonNext" if position else "buttonPrevious"))
def depart_ltb_html(self, node):
""" Depart link to block"""
# Add close div
self.depart_admonition(node)
# Register new directive
def setup(app):
app.add_directive("link-to-block", LinkToBlock)
app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))
|
flexible
|
{
"blob_id": "63cce356b792949b90b215e0a5826f7b33d2d375",
"index": 8064,
"step-1": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n <mask token>\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n<mask token>\n\n\ndef setup(app):\n app.add_directive('link-to-block', LinkToBlock)\n app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))\n",
"step-4": "<mask token>\n\n\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {'right-side': bool, 'label': str}\n\n def run(self):\n new_content = ViewList()\n ref = u':ref:`{0} <{1}>`'.format(self.options.get('label',\n 'Link To'), ''.join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\ndef visit_ltb_html(self, node):\n \"\"\" Visit link to block\"\"\"\n position = node.get('right-side', True)\n self.body.append(\"<div class='{0}'>\".format('buttonNext' if position else\n 'buttonPrevious'))\n\n\ndef depart_ltb_html(self, node):\n \"\"\" Depart link to block\"\"\"\n self.depart_admonition(node)\n\n\ndef setup(app):\n app.add_directive('link-to-block', LinkToBlock)\n app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))\n",
"step-5": "# System import\nimport os\n\n# Docutils import\nfrom docutils import nodes\nfrom docutils.parsers.rst.directives.admonitions import BaseAdmonition\nfrom docutils.statemachine import ViewList\n\n\n# Add node\nclass link_to_block(nodes.Admonition, nodes.Element):\n \"\"\" Node for inserting a link to button.\"\"\"\n pass\n\n\n# Add directive\nclass LinkToBlock(BaseAdmonition):\n \"\"\" Hidden technical block\"\"\"\n node_class = link_to_block\n has_content = False\n required_arguments = 1\n optional_arguments = 2\n final_argument_whitespace = True\n option_spec = {\n \"right-side\": bool,\n \"label\": str\n }\n\n def run(self):\n # Construct an empty node\n new_content = ViewList()\n ref = u\":ref:`{0} <{1}>`\".format(\n self.options.get(\"label\", \"Link To\"),\n \"\".join(self.arguments))\n new_content.append(ref, source=self.content)\n self.content = new_content\n return super(LinkToBlock, self).run()\n\n\n# Add html writer\ndef visit_ltb_html(self, node):\n \"\"\" Visit link to block\"\"\" \n # Generate the html div\n position = node.get(\"right-side\", True)\n self.body.append(\"<div class='{0}'>\".format(\n \"buttonNext\" if position else \"buttonPrevious\"))\n\n\ndef depart_ltb_html(self, node):\n \"\"\" Depart link to block\"\"\"\n # Add close div\n self.depart_admonition(node)\n\n\n# Register new directive\ndef setup(app):\n app.add_directive(\"link-to-block\", LinkToBlock)\n app.add_node(link_to_block, html=(visit_ltb_html, depart_ltb_html))\n",
"step-ids": [
5,
6,
7,
9,
11
]
}
|
[
5,
6,
7,
9,
11
] |
from liver_tumor_segmentation.CGBS_Net import *
from liver_tumor_segmentation.loss import *
from keras.optimizers import *
from liver_tumor_segmentation.CGBS_data_generator import *
from keras.callbacks import *
import os
from keras.callbacks import ReduceLROnPlateau
from keras import losses
from configuration import *
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
def train():
batch_size = 4 #4 for single GPU; 8 for two GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
trainGene = trainGenerator(batch_size, data_path='/data',
folder='train', aug_dict=aug_args, seed = 1, interaction='RECIST')
devGene = trainGenerator(batch_size, data_path='/data',
folder='dev', aug_dict=no_aug_args, seed = 1, interaction='RECIST')
testGene = testGenerator(test_path='test_path', interaction='RECIST')
model = CGBS_Net(input_shape=(256, 256, 4),rate=3)
model.summary()
# GPU_COUNT = 2
# model = multi_gpu_model(original_model, GPU_COUNT)
opt=SGD(lr=4e-4, decay=1e-6, momentum=0.9, nesterov=True)
lr_metric = get_lr_metric(opt)
model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss, 'out_shape': losses.binary_crossentropy},
loss_weights={'out_seg': 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])
csv_logger = CSVLogger('./Models/'+'CGBS_Net.csv', append=True) # ss-0.01
# tensorboard = TensorBoard(log_dir='./tmp/graph', write_graph=True, write_images=True)
# earlystopping = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')
model_checkpoint = ModelCheckpoint(
'./Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5',
monitor='val_out_seg_loss',
verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1, patience=50, mode='auto')
model.fit_generator(generator=trainGene, steps_per_epoch=int(5000/batch_size),
epochs=500, validation_data=devGene,
validation_steps=int(5000/batch_size), verbose=2,
callbacks=[model_checkpoint, csv_logger, reduce_lr])
train()
|
normal
|
{
"blob_id": "8c17f2c770c24bbf8c73628c6740c0b866e6b1c0",
"index": 9047,
"step-1": "<mask token>\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_lr_metric(optimizer):\n\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_lr_metric(optimizer):\n\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\ntrain()\n",
"step-4": "from liver_tumor_segmentation.CGBS_Net import *\nfrom liver_tumor_segmentation.loss import *\nfrom keras.optimizers import *\nfrom liver_tumor_segmentation.CGBS_data_generator import *\nfrom keras.callbacks import *\nimport os\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras import losses\nfrom configuration import *\n\n\ndef get_lr_metric(optimizer):\n\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\n\n\ndef train():\n batch_size = 4\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n trainGene = trainGenerator(batch_size, data_path='/data', folder=\n 'train', aug_dict=aug_args, seed=1, interaction='RECIST')\n devGene = trainGenerator(batch_size, data_path='/data', folder='dev',\n aug_dict=no_aug_args, seed=1, interaction='RECIST')\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\n model = CGBS_Net(input_shape=(256, 256, 4), rate=3)\n model.summary()\n opt = SGD(lr=0.0004, decay=1e-06, momentum=0.9, nesterov=True)\n lr_metric = get_lr_metric(opt)\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss,\n 'out_shape': losses.binary_crossentropy}, loss_weights={'out_seg': \n 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\n csv_logger = CSVLogger('./Models/' + 'CGBS_Net.csv', append=True)\n model_checkpoint = ModelCheckpoint(\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5', monitor\n ='val_out_seg_loss', verbose=0, save_best_only=True,\n save_weights_only=True, mode='auto', period=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1,\n patience=50, mode='auto')\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000 /\n batch_size), epochs=500, validation_data=devGene, validation_steps=\n int(5000 / batch_size), verbose=2, callbacks=[model_checkpoint,\n csv_logger, reduce_lr])\n\n\ntrain()\n",
"step-5": "from liver_tumor_segmentation.CGBS_Net import *\r\nfrom liver_tumor_segmentation.loss import *\r\nfrom keras.optimizers import *\r\nfrom liver_tumor_segmentation.CGBS_data_generator import *\r\nfrom keras.callbacks import *\r\nimport os\r\nfrom keras.callbacks import ReduceLROnPlateau\r\nfrom keras import losses\r\nfrom configuration import *\r\n\r\ndef get_lr_metric(optimizer):\r\n def lr(y_true, y_pred):\r\n return optimizer.lr\r\n\r\n return lr\r\ndef train():\r\n batch_size = 4 #4 for single GPU; 8 for two GPUs\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\r\n\r\n trainGene = trainGenerator(batch_size, data_path='/data',\r\n folder='train', aug_dict=aug_args, seed = 1, interaction='RECIST')\r\n devGene = trainGenerator(batch_size, data_path='/data',\r\n folder='dev', aug_dict=no_aug_args, seed = 1, interaction='RECIST')\r\n testGene = testGenerator(test_path='test_path', interaction='RECIST')\r\n\r\n model = CGBS_Net(input_shape=(256, 256, 4),rate=3)\r\n model.summary()\r\n\r\n # GPU_COUNT = 2\r\n # model = multi_gpu_model(original_model, GPU_COUNT)\r\n\r\n opt=SGD(lr=4e-4, decay=1e-6, momentum=0.9, nesterov=True)\r\n lr_metric = get_lr_metric(opt)\r\n model.compile(optimizer=opt, loss={'out_seg': dice_coef_loss, 'out_shape': losses.binary_crossentropy},\r\n loss_weights={'out_seg': 1, 'out_shape': 1}, metrics=[dice_coef, lr_metric])\r\n\r\n csv_logger = CSVLogger('./Models/'+'CGBS_Net.csv', append=True) # ss-0.01\r\n # tensorboard = TensorBoard(log_dir='./tmp/graph', write_graph=True, write_images=True)\r\n # earlystopping = EarlyStopping(monitor='val_loss', patience=0, verbose=0, mode='auto')\r\n\r\n model_checkpoint = ModelCheckpoint(\r\n './Models/CGBS/{epoch:02d}-{val_out_seg_dice_coef:.4f}.h5',\r\n monitor='val_out_seg_loss',\r\n verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)\r\n reduce_lr = ReduceLROnPlateau(monitor='val_out_seg_loss', factor=0.1, patience=50, mode='auto')\r\n model.fit_generator(generator=trainGene, steps_per_epoch=int(5000/batch_size),\r\n epochs=500, validation_data=devGene,\r\n validation_steps=int(5000/batch_size), verbose=2,\r\n callbacks=[model_checkpoint, csv_logger, reduce_lr])\r\n\r\n\r\ntrain()\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['A1', 'A']
<|reserved_special_token_1|>
from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import A1, A
__all__ = ['A1', 'A']
<|reserved_special_token_1|>
from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (
A1,
A,
)
__all__ = [
"A1",
"A",
]
|
flexible
|
{
"blob_id": "846a42a997539a45576d3ecbe0bd290e00b55935",
"index": 3258,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['A1', 'A']\n",
"step-3": "from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import A1, A\n__all__ = ['A1', 'A']\n",
"step-4": "from output.models.sun_data.ctype.content_type.content_type00401m.content_type00401m_xsd.content_type00401m import (\n A1,\n A,\n)\n\n__all__ = [\n \"A1\",\n \"A\",\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
fname = input('Enter the file name to open')
fh = open(fname)
lst1 = list()
data = dict()
for ln in fh :
if ln.startswith("From"):
if ln.startswith('From:'):
continue
else :
word = ln.split()
lst1.append(word[1])
for word in lst1:
data[word] = data.get(word,0)+1
bigcount = None
bigword = None
for word,count in data.items():
if bigcount is None or bigcount<count:
bigcount = count
bigword = word
print(bigword,bigcount)
|
normal
|
{
"blob_id": "4fba13d051a3aceb393a4473cdbf6d4fc684c7ac",
"index": 9473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor ln in fh:\n if ln.startswith('From'):\n if ln.startswith('From:'):\n continue\n else:\n word = ln.split()\n lst1.append(word[1])\nfor word in lst1:\n data[word] = data.get(word, 0) + 1\n<mask token>\nfor word, count in data.items():\n if bigcount is None or bigcount < count:\n bigcount = count\n bigword = word\nprint(bigword, bigcount)\n",
"step-3": "fname = input('Enter the file name to open')\nfh = open(fname)\nlst1 = list()\ndata = dict()\nfor ln in fh:\n if ln.startswith('From'):\n if ln.startswith('From:'):\n continue\n else:\n word = ln.split()\n lst1.append(word[1])\nfor word in lst1:\n data[word] = data.get(word, 0) + 1\nbigcount = None\nbigword = None\nfor word, count in data.items():\n if bigcount is None or bigcount < count:\n bigcount = count\n bigword = word\nprint(bigword, bigcount)\n",
"step-4": "fname = input('Enter the file name to open')\r\nfh = open(fname)\r\nlst1 = list()\r\ndata = dict()\r\nfor ln in fh :\r\n if ln.startswith(\"From\"):\r\n if ln.startswith('From:'):\r\n continue\r\n else :\r\n word = ln.split()\r\n lst1.append(word[1])\r\nfor word in lst1:\r\n data[word] = data.get(word,0)+1\r\nbigcount = None\r\nbigword = None\r\nfor word,count in data.items():\r\n if bigcount is None or bigcount<count:\r\n bigcount = count\r\n bigword = word\r\nprint(bigword,bigcount)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_app(config_name):
app = Flask(__name__, static_folder=static_file_dir)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
db.init_app(app)
return app
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = SQLAlchemy()
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'static')
def create_app(config_name):
app = Flask(__name__, static_folder=static_file_dir)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
db.init_app(app)
return app
<|reserved_special_token_1|>
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
import os
db = SQLAlchemy()
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'static')
def create_app(config_name):
app = Flask(__name__, static_folder=static_file_dir)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
db.init_app(app)
return app
|
flexible
|
{
"blob_id": "bee6ba1db608c1d9c8114f89d4b3abab795a6b86",
"index": 3843,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'static')\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config import config\nimport os\ndb = SQLAlchemy()\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'static')\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import time
while 1:
r = requests.put("http://localhost:3000/api/4", data={"temperature": 24, "led": 1})
print r.text
time.sleep(1)
|
normal
|
{
"blob_id": "23a560c5f5553fc32329121ea47f8a7ae1196889",
"index": 440,
"step-1": "import requests\nimport time\n\nwhile 1:\n r = requests.put(\"http://localhost:3000/api/4\", data={\"temperature\": 24, \"led\": 1})\n print r.text\n time.sleep(1)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.2.7 on 2021-10-01 06:43
from django.db import migrations
import django_resized.forms
import event.models.event
import event.models.event_agenda
class Migration(migrations.Migration):
dependencies = [
('event', '0009_auto_20211001_0406'),
]
operations = [
migrations.AlterField(
model_name='event',
name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa del evento', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event.event_pictures, verbose_name='Mapa'),
),
migrations.AlterField(
model_name='eventagenda',
name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa de la exposicion', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.event_pictures, verbose_name='Mapa'),
),
]
|
normal
|
{
"blob_id": "d0a053faccecddc84a9556aec3dff691b171df96",
"index": 9977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('event', '0009_auto_20211001_0406')]\n operations = [migrations.AlterField(model_name='event', name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None,\n force_format='JPEG', help_text='Mapa del evento', keep_meta=True,\n null=True, quality=90, size=[1920, 1080], upload_to=event.models.\n event.event_pictures, verbose_name='Mapa')), migrations.AlterField(\n model_name='eventagenda', name='map', field=django_resized.forms.\n ResizedImageField(blank=True, crop=None, force_format='JPEG',\n help_text='Mapa de la exposicion', keep_meta=True, null=True,\n quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.\n event_pictures, verbose_name='Mapa'))]\n",
"step-4": "from django.db import migrations\nimport django_resized.forms\nimport event.models.event\nimport event.models.event_agenda\n\n\nclass Migration(migrations.Migration):\n dependencies = [('event', '0009_auto_20211001_0406')]\n operations = [migrations.AlterField(model_name='event', name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None,\n force_format='JPEG', help_text='Mapa del evento', keep_meta=True,\n null=True, quality=90, size=[1920, 1080], upload_to=event.models.\n event.event_pictures, verbose_name='Mapa')), migrations.AlterField(\n model_name='eventagenda', name='map', field=django_resized.forms.\n ResizedImageField(blank=True, crop=None, force_format='JPEG',\n help_text='Mapa de la exposicion', keep_meta=True, null=True,\n quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.\n event_pictures, verbose_name='Mapa'))]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-10-01 06:43\n\nfrom django.db import migrations\nimport django_resized.forms\nimport event.models.event\nimport event.models.event_agenda\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('event', '0009_auto_20211001_0406'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa del evento', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event.event_pictures, verbose_name='Mapa'),\n ),\n migrations.AlterField(\n model_name='eventagenda',\n name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa de la exposicion', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.event_pictures, verbose_name='Mapa'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['CountEncoder', 'CombinCountEncoder', 'FrequencyEncoder',
'NullCounter', 'AutoCalcEncoder', 'extract_obj_cols']
<|reserved_special_token_1|>
from CategoryReplacer.CategoryReplcaers import CountEncoder
from CategoryReplacer.CategoryReplcaers import CombinCountEncoder
from CategoryReplacer.CategoryReplcaers import FrequencyEncoder
from CategoryReplacer.CategoryReplcaers import NullCounter
from CategoryReplacer.CategoryReplcaers import AutoCalcEncoder
from CategoryReplacer.CategoryReplcaers import extract_obj_cols
__all__ = ['CountEncoder', 'CombinCountEncoder', 'FrequencyEncoder',
'NullCounter', 'AutoCalcEncoder', 'extract_obj_cols']
<|reserved_special_token_1|>
from CategoryReplacer.CategoryReplcaers import CountEncoder
from CategoryReplacer.CategoryReplcaers import CombinCountEncoder
from CategoryReplacer.CategoryReplcaers import FrequencyEncoder
from CategoryReplacer.CategoryReplcaers import NullCounter
from CategoryReplacer.CategoryReplcaers import AutoCalcEncoder
from CategoryReplacer.CategoryReplcaers import extract_obj_cols
__all__ = [
"CountEncoder",
"CombinCountEncoder",
"FrequencyEncoder",
"NullCounter",
"AutoCalcEncoder",
"extract_obj_cols"
]
|
flexible
|
{
"blob_id": "d28e517e72c3689e973a5b1255d414648de418fb",
"index": 1658,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['CountEncoder', 'CombinCountEncoder', 'FrequencyEncoder',\n 'NullCounter', 'AutoCalcEncoder', 'extract_obj_cols']\n",
"step-3": "from CategoryReplacer.CategoryReplcaers import CountEncoder\nfrom CategoryReplacer.CategoryReplcaers import CombinCountEncoder\nfrom CategoryReplacer.CategoryReplcaers import FrequencyEncoder\nfrom CategoryReplacer.CategoryReplcaers import NullCounter\nfrom CategoryReplacer.CategoryReplcaers import AutoCalcEncoder\nfrom CategoryReplacer.CategoryReplcaers import extract_obj_cols\n__all__ = ['CountEncoder', 'CombinCountEncoder', 'FrequencyEncoder',\n 'NullCounter', 'AutoCalcEncoder', 'extract_obj_cols']\n",
"step-4": "from CategoryReplacer.CategoryReplcaers import CountEncoder\nfrom CategoryReplacer.CategoryReplcaers import CombinCountEncoder\nfrom CategoryReplacer.CategoryReplcaers import FrequencyEncoder\nfrom CategoryReplacer.CategoryReplcaers import NullCounter\nfrom CategoryReplacer.CategoryReplcaers import AutoCalcEncoder\nfrom CategoryReplacer.CategoryReplcaers import extract_obj_cols\n\n__all__ = [\n \"CountEncoder\",\n \"CombinCountEncoder\",\n \"FrequencyEncoder\",\n \"NullCounter\",\n \"AutoCalcEncoder\",\n \"extract_obj_cols\"\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from functools import reduce
from math import (log, sqrt)
import matplotlib.pyplot as plt
import matplotlib.pylab as mlab
import numpy
import random
import scipy.stats
class Node:
def __init__(
self,
name,
val=None,
observed=False,
candidate_standard_deviation=1,
save_samples=False
):
self.name = name
self.val = val
self.observed = observed
self.candidate_standard_deviation = candidate_standard_deviation
self.children = []
self.posteriors = []
self.rejected = 0
self.stayed = 0
self.accepted = 0
#if save_samples:
# self.file = open(self.name, 'w')
#else:
# self.file = None
def likelihood(self):
raise NotImplementedError
def complete_conditional(self, target):
return reduce(
lambda l, child: l + child.likelihood(),
self.children,
self.likelihood()
)
def save_sample(self, val):
if self.file:
self.file.write('{}\n'.format(self.val))
def sample(self, isBurn=False):
if self.observed:
#self.save_sample()
return self.val
# get a candidate value
cand = numpy.random.normal(self.val, self.candidate_standard_deviation)
cand = self.cleanse_val(cand)
#print(self.name, 'cand', cand)
if not self.in_support(cand):
#print('*****', self.name, 'reject', cand)
if not isBurn:
self.posteriors.append(self.val)
self.rejected = self.rejected + 1
self.stayed = self.stayed + 1
#self.save_sample()
return self.val
old_val = self.val
reject_likelihood = self.likelihood(old_val)
accept_likelihood = self.likelihood(cand)
# factor in the children with the curernt value
for child in self.children:
reject_likelihood += child.likelihood()
# get the likelihood of the candidate value
self.val = cand
for child in self.children:
accept_likelihood += child.likelihood()
u = log(random.random())
#print(self.name, 'r', reject_likelihood)
#print(self.name, 'a', accept_likelihood)
#print(self.name, 'u', u)
# set it back if staying is more likely
if u >= accept_likelihood - reject_likelihood:
#print(self.name, 'set it back')
self.val = old_val
if not isBurn:
self.stayed = self.stayed + 1
else:
#print(self.name, 'keep the cand', cand)
if not isBurn:
self.accepted = self.accepted + 1
if not isBurn:
self.posteriors.append(self.val)
#self.save_sample()
return self.val
def cleanse_val(self, val):
return val
# Need a function to handle the Add node's value retrieval
def value(self):
return self.val
def add_child(self, child):
self.children.append(child)
def mixplot(self, write=False):
if (len(self.posteriors) == 0):
return
xs, ys = zip(*enumerate(self.posteriors))
plt.plot(xs, ys)
if write:
plt.savefig(self.name + '-mixplot.png')
plt.close()
else:
plt.show()
def plot_posterior(self, write=False):
if (len(self.posteriors) == 0):
return
#sample_min = min(self.posteriors)
#sample_max = max(self.posteriors)
#xs = mlab.frange(sample_min, sample_max, (sample_max - sample_min) / 100)
#ys = [self.pdf(x) for x in xs]
#plt.plot(xs, ys, label='Priot Dist ' + self.name)
#plt.title('Prior Dist {}:{}'.format(self.name, self.candidate_standard_deviation))
plt.title('Posterior {}'.format(self.name))
plt.hist(self.posteriors, bins=30, normed=True, label="Posterior Dist " + self.name)
if write:
plt.savefig(self.name + '-posterior.png')
plt.close()
else:
plt.show()
def __add__(self, other):
return Add(self, other)
def __pow__(self, other):
return Power(self, other)
class Add(Node):
def __init__(self, *args):
def map_args(n):
if isinstance(n, Node):
return n
else:
return Fixed('Fixed ({})'.format(n), val=n)
self.parents = [ map_args(n) for n in list(args)]
Node.__init__(
self,
':'.join([ p.name for p in self.parents ]) + ' (Add)',
)
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return reduce(lambda total, p: total + p.value(), self.parents, 0)
# The purpose of this node is to just have something that gives a fixed value
# With a probability of 1. This is useful for priors.
class Fixed(Node):
def __init__(self, name, val=None):
Node.__init__(
self,
name + ' (Fixed)',
val=val
)
def likelihood(self):
# It's in log space, remember
return 0
class Power(Node):
def __init__(self, base, exponent):
if isinstance(base, Node):
self.base = base
else:
self.base = Fixed('base {}'.format(base), val=base)
if isinstance(exponent, Node):
self.exponent = exponent
else:
self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)
name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)
Node.__init__(
self,
name,
)
self.parents = [ self.base, self.exponent ]
def add_child(self, child):
for p in self.parents:
p.add_child(child)
def value(self):
return self.base.value() ** self.exponent.value()
|
normal
|
{
"blob_id": "4c5db1af9fd1c9b09f6e64a44d72351807c0f7a5",
"index": 8136,
"step-1": "<mask token>\n\n\nclass Node:\n <mask token>\n <mask token>\n <mask token>\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n <mask token>\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n <mask token>\n <mask token>\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-2": "<mask token>\n\n\nclass Node:\n <mask token>\n <mask token>\n\n def complete_conditional(self, target):\n return reduce(lambda l, child: l + child.likelihood(), self.\n children, self.likelihood())\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n <mask token>\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n <mask token>\n <mask token>\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, name, val=None, observed=False,\n candidate_standard_deviation=1, save_samples=False):\n self.name = name\n self.val = val\n self.observed = observed\n self.candidate_standard_deviation = candidate_standard_deviation\n self.children = []\n self.posteriors = []\n self.rejected = 0\n self.stayed = 0\n self.accepted = 0\n <mask token>\n\n def complete_conditional(self, target):\n return reduce(lambda l, child: l + child.likelihood(), self.\n children, self.likelihood())\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n\n def value(self):\n return self.val\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n\n def plot_posterior(self, write=False):\n if len(self.posteriors) == 0:\n return\n plt.title('Posterior {}'.format(self.name))\n plt.hist(self.posteriors, bins=30, normed=True, label=\n 'Posterior Dist ' + self.name)\n if write:\n plt.savefig(self.name + '-posterior.png')\n plt.close()\n else:\n plt.show()\n\n def __add__(self, other):\n return Add(self, other)\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-4": "<mask token>\n\n\nclass Node:\n\n def __init__(self, name, val=None, observed=False,\n candidate_standard_deviation=1, save_samples=False):\n self.name = name\n self.val = val\n self.observed = observed\n self.candidate_standard_deviation = candidate_standard_deviation\n self.children = []\n self.posteriors = []\n self.rejected = 0\n self.stayed = 0\n self.accepted = 0\n\n def likelihood(self):\n raise NotImplementedError\n\n def complete_conditional(self, target):\n return reduce(lambda l, child: l + child.likelihood(), self.\n children, self.likelihood())\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n return self.val\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n if not self.in_support(cand):\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n return self.val\n old_val = self.val\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n for child in self.children:\n reject_likelihood += child.likelihood()\n self.val = cand\n for child in self.children:\n accept_likelihood += child.likelihood()\n u = log(random.random())\n if u >= accept_likelihood - reject_likelihood:\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n elif not isBurn:\n self.accepted = self.accepted + 1\n if not isBurn:\n self.posteriors.append(self.val)\n return self.val\n\n def cleanse_val(self, val):\n return val\n\n def value(self):\n return self.val\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if len(self.posteriors) == 0:\n return\n xs, ys = zip(*enumerate(self.posteriors))\n plt.plot(xs, ys)\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n\n def plot_posterior(self, write=False):\n if len(self.posteriors) == 0:\n return\n plt.title('Posterior {}'.format(self.name))\n plt.hist(self.posteriors, bins=30, normed=True, label=\n 'Posterior Dist ' + self.name)\n if write:\n plt.savefig(self.name + '-posterior.png')\n plt.close()\n else:\n plt.show()\n\n def __add__(self, other):\n return Add(self, other)\n\n def __pow__(self, other):\n return Power(self, other)\n\n\nclass Add(Node):\n\n def __init__(self, *args):\n\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n self.parents = [map_args(n) for n in list(args)]\n Node.__init__(self, ':'.join([p.name for p in self.parents]) + ' (Add)'\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n\nclass Fixed(Node):\n\n def __init__(self, name, val=None):\n Node.__init__(self, name + ' (Fixed)', val=val)\n\n def likelihood(self):\n return 0\n\n\nclass Power(Node):\n\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n if isinstance(exponent, Node):\n self.exponent = exponent\n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n Node.__init__(self, name)\n self.parents = [self.base, self.exponent]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-5": "from functools import reduce\nfrom math import (log, sqrt)\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as mlab\nimport numpy\nimport random\nimport scipy.stats\n\nclass Node:\n def __init__(\n self,\n name,\n val=None,\n observed=False,\n candidate_standard_deviation=1,\n save_samples=False\n ):\n self.name = name\n self.val = val\n self.observed = observed\n self.candidate_standard_deviation = candidate_standard_deviation\n self.children = []\n self.posteriors = []\n self.rejected = 0\n self.stayed = 0\n self.accepted = 0\n\n #if save_samples:\n # self.file = open(self.name, 'w')\n #else:\n # self.file = None\n\n def likelihood(self):\n raise NotImplementedError\n\n def complete_conditional(self, target):\n return reduce(\n lambda l, child: l + child.likelihood(),\n self.children,\n self.likelihood()\n )\n\n def save_sample(self, val):\n if self.file:\n self.file.write('{}\\n'.format(self.val))\n\n def sample(self, isBurn=False):\n if self.observed:\n #self.save_sample()\n return self.val\n\n # get a candidate value\n cand = numpy.random.normal(self.val, self.candidate_standard_deviation)\n cand = self.cleanse_val(cand)\n\n #print(self.name, 'cand', cand)\n\n if not self.in_support(cand):\n #print('*****', self.name, 'reject', cand)\n if not isBurn:\n self.posteriors.append(self.val)\n self.rejected = self.rejected + 1\n self.stayed = self.stayed + 1\n #self.save_sample()\n return self.val\n\n old_val = self.val\n\n reject_likelihood = self.likelihood(old_val)\n accept_likelihood = self.likelihood(cand)\n\n # factor in the children with the curernt value\n for child in self.children:\n reject_likelihood += child.likelihood()\n\n # get the likelihood of the candidate value\n self.val = cand\n\n for child in self.children:\n accept_likelihood += child.likelihood()\n\n u = log(random.random())\n\n #print(self.name, 'r', reject_likelihood)\n #print(self.name, 'a', accept_likelihood)\n #print(self.name, 'u', u)\n\n # set it back if staying is more likely\n if u >= accept_likelihood - reject_likelihood:\n #print(self.name, 'set it back')\n self.val = old_val\n if not isBurn:\n self.stayed = self.stayed + 1\n else:\n #print(self.name, 'keep the cand', cand)\n if not isBurn:\n self.accepted = self.accepted + 1\n\n if not isBurn:\n self.posteriors.append(self.val)\n #self.save_sample()\n\n return self.val\n\n def cleanse_val(self, val):\n return val\n\n # Need a function to handle the Add node's value retrieval\n def value(self):\n return self.val\n\n def add_child(self, child):\n self.children.append(child)\n\n def mixplot(self, write=False):\n if (len(self.posteriors) == 0):\n return\n\n xs, ys = zip(*enumerate(self.posteriors))\n\n plt.plot(xs, ys)\n\n if write:\n plt.savefig(self.name + '-mixplot.png')\n plt.close()\n else:\n plt.show()\n\n def plot_posterior(self, write=False):\n if (len(self.posteriors) == 0):\n return\n #sample_min = min(self.posteriors)\n #sample_max = max(self.posteriors)\n\n #xs = mlab.frange(sample_min, sample_max, (sample_max - sample_min) / 100)\n #ys = [self.pdf(x) for x in xs]\n #plt.plot(xs, ys, label='Priot Dist ' + self.name)\n\n #plt.title('Prior Dist {}:{}'.format(self.name, self.candidate_standard_deviation))\n plt.title('Posterior {}'.format(self.name))\n plt.hist(self.posteriors, bins=30, normed=True, label=\"Posterior Dist \" + self.name)\n\n if write:\n plt.savefig(self.name + '-posterior.png')\n plt.close()\n else:\n plt.show()\n\n def __add__(self, other):\n return Add(self, other)\n\n def __pow__(self, other):\n return Power(self, other)\n\nclass Add(Node):\n def __init__(self, *args):\n def map_args(n):\n if isinstance(n, Node):\n return n\n else:\n return Fixed('Fixed ({})'.format(n), val=n)\n\n self.parents = [ map_args(n) for n in list(args)]\n\n Node.__init__(\n self,\n ':'.join([ p.name for p in self.parents ]) + ' (Add)',\n )\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return reduce(lambda total, p: total + p.value(), self.parents, 0)\n\n# The purpose of this node is to just have something that gives a fixed value\n# With a probability of 1. This is useful for priors.\nclass Fixed(Node):\n def __init__(self, name, val=None):\n Node.__init__(\n self,\n name + ' (Fixed)',\n val=val\n )\n\n def likelihood(self):\n # It's in log space, remember\n return 0\n\nclass Power(Node):\n def __init__(self, base, exponent):\n if isinstance(base, Node):\n self.base = base\n else:\n self.base = Fixed('base {}'.format(base), val=base)\n\n if isinstance(exponent, Node):\n self.exponent = exponent \n else:\n self.exponent = Fixed('exponent {}'.format(exponent), val=exponent)\n\n name = '{}:{} (Pow)'.format(self.base.name, self.exponent.name)\n\n Node.__init__(\n self,\n name,\n )\n\n self.parents = [ self.base, self.exponent ]\n\n def add_child(self, child):\n for p in self.parents:\n p.add_child(child)\n\n def value(self):\n return self.base.value() ** self.exponent.value()\n",
"step-ids": [
18,
19,
23,
24,
26
]
}
|
[
18,
19,
23,
24,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in listings:
coins[str(i['id'])] = i['slug']
slugs[i['slug']] = str(i['id'])
for i in coins:
page = requests.get(
f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630'
)
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id='__NEXT_DATA__', type='application/json')
if data is not None:
historical_data = json.loads(data.contents[0])
if str(i) in historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical']:
quotes = historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical'][i]['quotes']
name = historical_data['props']['initialState']['cryptocurrency'][
'ohlcvHistorical'][i]['name']
symbol = historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical'][i]['symbol']
historical_list.append((quotes, name, symbol))
<|reserved_special_token_0|>
for data in historical_list:
quotes, curr_name, curr_symbol = data
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
high.append(j['quote']['USD']['high'])
low.append(j['quote']['USD']['low'])
open.append(j['quote']['USD']['open'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(curr_name)
symbol.append(curr_symbol)
<|reserved_special_token_0|>
df.to_csv('cryptos.csv', index=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cmc = requests.get('https://coinmarketcap.com/')
soup = BeautifulSoup(cmc.content, 'html.parser')
data = soup.find('script', id='__NEXT_DATA__', type='application/json')
coins = {}
slugs = {}
coin_data = json.loads(data.contents[0])
listings = coin_data['props']['initialState']['cryptocurrency']['listingLatest'
]['data']
historical_list = []
for i in listings:
coins[str(i['id'])] = i['slug']
slugs[i['slug']] = str(i['id'])
for i in coins:
page = requests.get(
f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630'
)
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id='__NEXT_DATA__', type='application/json')
if data is not None:
historical_data = json.loads(data.contents[0])
if str(i) in historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical']:
quotes = historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical'][i]['quotes']
name = historical_data['props']['initialState']['cryptocurrency'][
'ohlcvHistorical'][i]['name']
symbol = historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical'][i]['symbol']
historical_list.append((quotes, name, symbol))
market_cap = []
volume = []
high = []
low = []
open = []
timestamp = []
name = []
symbol = []
for data in historical_list:
quotes, curr_name, curr_symbol = data
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
high.append(j['quote']['USD']['high'])
low.append(j['quote']['USD']['low'])
open.append(j['quote']['USD']['open'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(curr_name)
symbol.append(curr_symbol)
df = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open',
'timestamp', 'name', 'symbol'])
df['marketcap'] = market_cap
df['volume'] = volume
df['high'] = high
df['low'] = low
df['open'] = open
df['timestamp'] = timestamp
df['name'] = name
df['symbol'] = symbol
df.to_csv('cryptos.csv', index=False)
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
cmc = requests.get('https://coinmarketcap.com/')
soup = BeautifulSoup(cmc.content, 'html.parser')
data = soup.find('script', id='__NEXT_DATA__', type='application/json')
coins = {}
slugs = {}
coin_data = json.loads(data.contents[0])
listings = coin_data['props']['initialState']['cryptocurrency']['listingLatest'
]['data']
historical_list = []
for i in listings:
coins[str(i['id'])] = i['slug']
slugs[i['slug']] = str(i['id'])
for i in coins:
page = requests.get(
f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630'
)
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id='__NEXT_DATA__', type='application/json')
if data is not None:
historical_data = json.loads(data.contents[0])
if str(i) in historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical']:
quotes = historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical'][i]['quotes']
name = historical_data['props']['initialState']['cryptocurrency'][
'ohlcvHistorical'][i]['name']
symbol = historical_data['props']['initialState']['cryptocurrency'
]['ohlcvHistorical'][i]['symbol']
historical_list.append((quotes, name, symbol))
market_cap = []
volume = []
high = []
low = []
open = []
timestamp = []
name = []
symbol = []
for data in historical_list:
quotes, curr_name, curr_symbol = data
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
high.append(j['quote']['USD']['high'])
low.append(j['quote']['USD']['low'])
open.append(j['quote']['USD']['open'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(curr_name)
symbol.append(curr_symbol)
df = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open',
'timestamp', 'name', 'symbol'])
df['marketcap'] = market_cap
df['volume'] = volume
df['high'] = high
df['low'] = low
df['open'] = open
df['timestamp'] = timestamp
df['name'] = name
df['symbol'] = symbol
df.to_csv('cryptos.csv', index=False)
<|reserved_special_token_1|>
from bs4 import BeautifulSoup
import requests
import pandas as pd
import json
cmc = requests.get('https://coinmarketcap.com/')
soup = BeautifulSoup(cmc.content, 'html.parser')
data = soup.find('script', id="__NEXT_DATA__", type="application/json")
coins = {}
slugs = {}
coin_data = json.loads(data.contents[0])
listings = coin_data['props']['initialState']['cryptocurrency']['listingLatest']['data']
historical_list = []
for i in listings:
coins[str(i['id'])] = i['slug']
slugs[i['slug']] = str(i['id'])
# https://coinmarketcap.com/currencies/[slug]/historical-data/?start=[YYYYMMDD]&end=[YYYYMMDD]
for i in coins:
page = requests.get(f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630')
soup = BeautifulSoup(page.content, 'html.parser')
data = soup.find('script', id="__NEXT_DATA__", type="application/json")
if data is not None:
historical_data = json.loads(data.contents[0])
if str(i) in historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical']:
quotes = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['quotes']
name = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['name']
symbol = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['symbol']
historical_list.append((quotes, name, symbol))
market_cap = []
volume = []
high = []
low = []
open = []
timestamp = []
name = []
symbol = []
# slug = []
for data in historical_list:
quotes, curr_name, curr_symbol = data
# curr_slug = slugs[curr_name.lower()]
for j in quotes:
market_cap.append(j['quote']['USD']['market_cap'])
volume.append(j['quote']['USD']['volume'])
high.append(j['quote']['USD']['high'])
low.append(j['quote']['USD']['low'])
open.append(j['quote']['USD']['open'])
timestamp.append(j['quote']['USD']['timestamp'])
name.append(curr_name)
symbol.append(curr_symbol)
# slug.append(curr_slug)
df = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open', 'timestamp', 'name', 'symbol'])
df['marketcap'] = market_cap
df['volume'] = volume
df['high'] = high
df['low'] = low
df['open'] = open
df['timestamp'] = timestamp
df['name'] = name
df['symbol'] = symbol
# df['slug'] = slug
df.to_csv('cryptos.csv', index=False)
|
flexible
|
{
"blob_id": "925e1a1a99b70a8d56289b72fa0e16997e12d854",
"index": 4038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in listings:\n coins[str(i['id'])] = i['slug']\n slugs[i['slug']] = str(i['id'])\nfor i in coins:\n page = requests.get(\n f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630'\n )\n soup = BeautifulSoup(page.content, 'html.parser')\n data = soup.find('script', id='__NEXT_DATA__', type='application/json')\n if data is not None:\n historical_data = json.loads(data.contents[0])\n if str(i) in historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical']:\n quotes = historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical'][i]['quotes']\n name = historical_data['props']['initialState']['cryptocurrency'][\n 'ohlcvHistorical'][i]['name']\n symbol = historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical'][i]['symbol']\n historical_list.append((quotes, name, symbol))\n<mask token>\nfor data in historical_list:\n quotes, curr_name, curr_symbol = data\n for j in quotes:\n market_cap.append(j['quote']['USD']['market_cap'])\n volume.append(j['quote']['USD']['volume'])\n high.append(j['quote']['USD']['high'])\n low.append(j['quote']['USD']['low'])\n open.append(j['quote']['USD']['open'])\n timestamp.append(j['quote']['USD']['timestamp'])\n name.append(curr_name)\n symbol.append(curr_symbol)\n<mask token>\ndf.to_csv('cryptos.csv', index=False)\n",
"step-3": "<mask token>\ncmc = requests.get('https://coinmarketcap.com/')\nsoup = BeautifulSoup(cmc.content, 'html.parser')\ndata = soup.find('script', id='__NEXT_DATA__', type='application/json')\ncoins = {}\nslugs = {}\ncoin_data = json.loads(data.contents[0])\nlistings = coin_data['props']['initialState']['cryptocurrency']['listingLatest'\n ]['data']\nhistorical_list = []\nfor i in listings:\n coins[str(i['id'])] = i['slug']\n slugs[i['slug']] = str(i['id'])\nfor i in coins:\n page = requests.get(\n f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630'\n )\n soup = BeautifulSoup(page.content, 'html.parser')\n data = soup.find('script', id='__NEXT_DATA__', type='application/json')\n if data is not None:\n historical_data = json.loads(data.contents[0])\n if str(i) in historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical']:\n quotes = historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical'][i]['quotes']\n name = historical_data['props']['initialState']['cryptocurrency'][\n 'ohlcvHistorical'][i]['name']\n symbol = historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical'][i]['symbol']\n historical_list.append((quotes, name, symbol))\nmarket_cap = []\nvolume = []\nhigh = []\nlow = []\nopen = []\ntimestamp = []\nname = []\nsymbol = []\nfor data in historical_list:\n quotes, curr_name, curr_symbol = data\n for j in quotes:\n market_cap.append(j['quote']['USD']['market_cap'])\n volume.append(j['quote']['USD']['volume'])\n high.append(j['quote']['USD']['high'])\n low.append(j['quote']['USD']['low'])\n open.append(j['quote']['USD']['open'])\n timestamp.append(j['quote']['USD']['timestamp'])\n name.append(curr_name)\n symbol.append(curr_symbol)\ndf = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open',\n 'timestamp', 'name', 'symbol'])\ndf['marketcap'] = market_cap\ndf['volume'] = volume\ndf['high'] = high\ndf['low'] = low\ndf['open'] = open\ndf['timestamp'] = timestamp\ndf['name'] = name\ndf['symbol'] = symbol\ndf.to_csv('cryptos.csv', index=False)\n",
"step-4": "from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport json\ncmc = requests.get('https://coinmarketcap.com/')\nsoup = BeautifulSoup(cmc.content, 'html.parser')\ndata = soup.find('script', id='__NEXT_DATA__', type='application/json')\ncoins = {}\nslugs = {}\ncoin_data = json.loads(data.contents[0])\nlistings = coin_data['props']['initialState']['cryptocurrency']['listingLatest'\n ]['data']\nhistorical_list = []\nfor i in listings:\n coins[str(i['id'])] = i['slug']\n slugs[i['slug']] = str(i['id'])\nfor i in coins:\n page = requests.get(\n f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630'\n )\n soup = BeautifulSoup(page.content, 'html.parser')\n data = soup.find('script', id='__NEXT_DATA__', type='application/json')\n if data is not None:\n historical_data = json.loads(data.contents[0])\n if str(i) in historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical']:\n quotes = historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical'][i]['quotes']\n name = historical_data['props']['initialState']['cryptocurrency'][\n 'ohlcvHistorical'][i]['name']\n symbol = historical_data['props']['initialState']['cryptocurrency'\n ]['ohlcvHistorical'][i]['symbol']\n historical_list.append((quotes, name, symbol))\nmarket_cap = []\nvolume = []\nhigh = []\nlow = []\nopen = []\ntimestamp = []\nname = []\nsymbol = []\nfor data in historical_list:\n quotes, curr_name, curr_symbol = data\n for j in quotes:\n market_cap.append(j['quote']['USD']['market_cap'])\n volume.append(j['quote']['USD']['volume'])\n high.append(j['quote']['USD']['high'])\n low.append(j['quote']['USD']['low'])\n open.append(j['quote']['USD']['open'])\n timestamp.append(j['quote']['USD']['timestamp'])\n name.append(curr_name)\n symbol.append(curr_symbol)\ndf = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open',\n 'timestamp', 'name', 'symbol'])\ndf['marketcap'] = market_cap\ndf['volume'] = volume\ndf['high'] = high\ndf['low'] = low\ndf['open'] = open\ndf['timestamp'] = timestamp\ndf['name'] = name\ndf['symbol'] = symbol\ndf.to_csv('cryptos.csv', index=False)\n",
"step-5": "from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport json\n\ncmc = requests.get('https://coinmarketcap.com/')\nsoup = BeautifulSoup(cmc.content, 'html.parser')\n\ndata = soup.find('script', id=\"__NEXT_DATA__\", type=\"application/json\")\n\ncoins = {}\nslugs = {}\ncoin_data = json.loads(data.contents[0])\nlistings = coin_data['props']['initialState']['cryptocurrency']['listingLatest']['data']\n\nhistorical_list = []\n\nfor i in listings:\n coins[str(i['id'])] = i['slug']\n slugs[i['slug']] = str(i['id'])\n\n# https://coinmarketcap.com/currencies/[slug]/historical-data/?start=[YYYYMMDD]&end=[YYYYMMDD]\n\nfor i in coins:\n page = requests.get(f'https://coinmarketcap.com/currencies/{coins[i]}/historical-data/?start=20200101&end=20200630')\n soup = BeautifulSoup(page.content, 'html.parser')\n data = soup.find('script', id=\"__NEXT_DATA__\", type=\"application/json\")\n if data is not None:\n historical_data = json.loads(data.contents[0])\n if str(i) in historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical']:\n quotes = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['quotes']\n name = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['name']\n symbol = historical_data['props']['initialState']['cryptocurrency']['ohlcvHistorical'][i]['symbol']\n historical_list.append((quotes, name, symbol))\n\nmarket_cap = []\nvolume = []\nhigh = []\nlow = []\nopen = []\ntimestamp = []\nname = []\nsymbol = []\n# slug = []\n\nfor data in historical_list:\n quotes, curr_name, curr_symbol = data\n # curr_slug = slugs[curr_name.lower()]\n for j in quotes:\n market_cap.append(j['quote']['USD']['market_cap'])\n volume.append(j['quote']['USD']['volume'])\n high.append(j['quote']['USD']['high'])\n low.append(j['quote']['USD']['low'])\n open.append(j['quote']['USD']['open'])\n timestamp.append(j['quote']['USD']['timestamp'])\n name.append(curr_name)\n symbol.append(curr_symbol)\n # slug.append(curr_slug)\n\ndf = pd.DataFrame(columns=['marketcap', 'volume', 'high', 'low', 'open', 'timestamp', 'name', 'symbol'])\ndf['marketcap'] = market_cap\ndf['volume'] = volume\ndf['high'] = high\ndf['low'] = low\ndf['open'] = open\ndf['timestamp'] = timestamp\ndf['name'] = name\ndf['symbol'] = symbol\n# df['slug'] = slug\n\ndf.to_csv('cryptos.csv', index=False)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np, pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.base import BaseEstimator, TransformerMixin
from datetime import timedelta
import sys
DEBUG = False
class DailyAggregator(BaseEstimator, TransformerMixin):
''' Aggregates time-series values to daily level. '''
def __init__(self, id_columns, time_column, value_columns ):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
def fit(self, X, y=None): return self
def transform(self, X):
X = X.copy()
X[self.time_column] = X[self.time_column].dt.normalize()
X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[self.value_columns].sum()
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):
''' Adds missing time intervals in a time-series dataframe. '''
DAYS = 'days'
MINUTES = 'minutes'
HOURS = 'hours'
def __init__(self, id_columns, time_column, value_columns, time_unit, step_size ):
super().__init__()
if not isinstance(id_columns, list):
self.id_columns = [id_columns]
else:
self.id_columns = id_columns
self.time_column = time_column
if not isinstance(value_columns, list):
self.value_columns = [value_columns]
else:
self.value_columns = value_columns
self.time_unit = time_unit
self.step_size = int(step_size)
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
min_time = X[self.time_column].min()
max_time = X[self.time_column].max()
# print(min_time, max_time)
if self.time_unit == MissingTimeIntervalFiller.DAYS:
num_steps = ( (max_time - min_time).days // self.step_size ) + 1
all_time_ints = [min_time + timedelta(days=x*self.step_size) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.HOURS:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1
num_steps = (max_time - min_time).days + 1
all_time_ints = [min_time + timedelta(hours=x*self.step_size) for x in range(num_steps)]
elif self.time_unit == MissingTimeIntervalFiller.MINUTES:
time_diff_sec = (max_time - min_time).total_seconds()
num_steps = int(time_diff_sec // (60 * self.step_size)) + 1
# print('num_steps', num_steps)
all_time_ints = [min_time + timedelta(minutes=x*self.step_size) for x in range(num_steps)]
else:
raise Exception(f"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].")
# create df of all time intervals
full_intervals_df = pd.DataFrame(data = all_time_ints, columns = [self.time_column])
# get unique id-var values from original input data
id_cols_df = X[self.id_columns].drop_duplicates()
# get cross join of all time intervals and ids columns
full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(foo=1)).drop('foo', 1)
# merge original data on to this full table
full_df = full_df.merge(X[self.id_columns + [self.time_column] + self.value_columns],
on=self.id_columns + [self.time_column], how='left')
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(full_df.head())
print(full_df.shape)
return full_df
class DataPivoter(BaseEstimator, TransformerMixin):
''' Pivots a dataframe with a given column '''
def __init__(self, non_pivoted_columns, pivoting_column, pivoted_columns, fill_na_val):
super().__init__()
self.non_pivoted_columns = \
[non_pivoted_columns] if not isinstance(non_pivoted_columns, list) else non_pivoted_columns
self.pivoted_columns = [pivoted_columns] if not isinstance(pivoted_columns, list) else pivoted_columns
self.pivoting_column = pivoting_column
self.fill_na_val = fill_na_val
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
processed_X = X.pivot_table(index = self.non_pivoted_columns,
aggfunc=sum,
columns=self.pivoting_column,
values=self.pivoted_columns,
fill_value = self.fill_na_val
).reset_index()
# pivot table will result in multi column index. To get a regular column names
processed_X.columns = [ col[0] if col[1] == '' else col[1] for col in processed_X.columns ]
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(processed_X.head())
print(processed_X.shape)
return processed_X
def inverse_transform(self, preds_df):
# unpivot given dataframe
preds_df2 = pd.melt(preds_df.reset_index(),
id_vars=self.non_pivoted_columns,
value_vars=preds_df.columns,
var_name = self.pivoting_column,
value_name = self.pivoted_columns[0]
)
return preds_df2
class IndexSetter(BaseEstimator, TransformerMixin):
''' Set index '''
def __init__(self, index_cols, drop_existing):
self.index_cols = index_cols
self.drop_existing = drop_existing
def fit(self, X, y=None): return self # do nothing in fit
def transform(self, X):
X = X.copy()
X.reset_index(drop=self.drop_existing, inplace=True)
X.set_index(self.index_cols, inplace=True)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class SubTimeSeriesSampler(BaseEstimator, TransformerMixin):
''' Samples a sub-series of length t <= the original series of length T. Assumes series is in columns
Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.
'''
def __init__(self, series_len, num_reps):
self.series_len = series_len
self.num_reps = num_reps
def fit(self, X, y=None): return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(f"Error sampling series. Target length {self.series_len} exceeds current length {curr_len}")
sampled_data = []
data_arr = X.values
for _ in range(self.num_reps):
for i in range(data_arr.shape[0]):
rand_idx = np.random.randint(0, curr_len - self.series_len)
sampled_data.append( data_arr[i, rand_idx: rand_idx + self.series_len] )
idx = list(X.index) * self.num_reps
col_names = [ f't_{i}' for i in range(self.series_len)]
sampled_data = pd.DataFrame(sampled_data, columns=col_names, index= idx)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(sampled_data.head())
print(sampled_data.shape)
return sampled_data
class AddLeftRightFlipper(BaseEstimator, TransformerMixin):
'''
Adds left right flipped version of tensor
'''
def __init__(self): pass
def fit(self, X, y=None): return self
def transform(self, X):
X_flipped = pd.DataFrame( np.fliplr(X), columns=X.columns, index=X.index )
X = pd.concat([X, X_flipped], axis=0, ignore_index=True)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class SeriesLengthTrimmer(BaseEstimator, TransformerMixin):
'''
Trims the length of a series to use latest data points
'''
def __init__(self, series_len):
self.series_len = series_len
def fit(self, X, y=None): return self
def transform(self, X):
curr_len = X.shape[1]
if curr_len < self.series_len:
raise Exception(f"Error trimming series. Target length {self.series_len} exceeds current length {curr_len}")
X_vals = X.values[:, -self.series_len:]
col_names = [ f't_{i}' for i in range(self.series_len)]
X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X_vals.head())
print(X_vals.shape)
return X_vals
class DFShuffler(BaseEstimator, TransformerMixin):
def __init__(self, shuffle = True):
self.shuffle = shuffle
def fit(self, X, y=None): return self
def transform(self, X, y=None):
if self.shuffle == False: return X
X = X.sample(frac=1)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
class TSMinMaxScaler2(BaseEstimator, TransformerMixin):
'''Scales history and forecast parts of time-series based on history data'''
def __init__(self, scaling_len, upper_bound = 5.):
if scaling_len < 2: raise Exception("Min Max scaling length must be >= 2")
self.scaling_len = scaling_len
self.max_scaler = MinMaxScaler()
self.row_sums = None
self.upper_bound = upper_bound
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
curr_len = X.shape[1]
if curr_len < self.scaling_len:
msg = f''' Error scaling series.
Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. '''
raise Exception(msg)
df = X if curr_len == self.scaling_len else X[ X.columns[ : self.scaling_len ] ]
self.row_sums = df.sum(axis=1)
df = df[self.row_sums != 0]
self.max_scaler.fit(df.T)
# print(X.shape, self.row_sums.shape)
# sys.exit()
X_filtered = X[self.row_sums != 0].copy()
vals = self.max_scaler.transform(X_filtered.T).T
vals = np.where(vals > self.upper_bound, self.upper_bound, vals)
X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
return self.max_scaler.inverse_transform(X.T).T
class TSMinMaxScaler(BaseEstimator, TransformerMixin):
'''Scales history and forecast parts of time-series based on history data'''
def __init__(self, scaling_len, upper_bound = 5.):
if scaling_len < 2: raise Exception("Min Max scaling length must be >= 2")
self.scaling_len = scaling_len
self.min_vals = None
self.max_vals = None
self.ranges = None
self.upper_bound = upper_bound
def fit(self, X, y=None): return self
def transform(self, X, y=None):
if self.scaling_len < 1:
msg = f''' Error scaling series.
scaling_len needs to be at least 2. Given length is {self.scaling_len}. '''
raise Exception(msg)
X_vals = X.values
self.min_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].min(axis=1), axis = 1)
self.max_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].max(axis=1), axis = 1)
self.ranges = self.max_vals - self.min_vals
self.ranges = np.where(self.ranges == 0, 1e-5, self.ranges)
# print(self.min_vals.shape, self.ranges.shape)
# sys.exit()
X_vals = X_vals - self.min_vals
X_vals = np.divide(X_vals, self.ranges)
X_vals = np.where( X_vals < self.upper_bound, X_vals, self.upper_bound)
X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)
if DEBUG:
print(f'-------after {__class__.__name__ }------------')
print(X.head())
print(X.shape)
return X
def inverse_transform(self, X):
X = X * self.ranges
X = X + self.min_vals
return X
class TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):
'''Splits the time series into X (history) and Y (forecast) series'''
def __init__(self, X_len, Y_len):
self.X_len = X_len
self.Y_len = Y_len
def fit(self, X, y=None): return self
def transform(self, X, y=None):
curr_len = X.shape[1]
encode_len = self.X_len
decode_len = (0 if self.Y_len == 'auto' else self.Y_len)
if curr_len < encode_len + decode_len:
msg = f''' Error splitting series.
Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. '''
raise Exception(msg)
# bit of a hack but sklearn pipeline only allows one thing to be returned in transform()
cols = X.columns
if self.Y_len == 'auto': return { 'X': X[cols[-self.X_len :]], 'Y': X[cols[-self.X_len :]] }
if self.Y_len == 0: return { 'X': X[cols[-self.X_len :]], 'Y': pd.DataFrame() }
return {
'X': X[cols[-( self.X_len + self.Y_len) : -self.Y_len] ],
'Y':X[cols[ -self.Y_len : ] ]
}
if __name__ == "__main__":
# data = pd.read_parquet("wfm_single_q_Internal_daily_history.parquet")
# data = pd.read_parquet("WFM_200q_Internal_daily_history.parquet")
# data.rename(columns={ 'queueid': 'seriesid', 'date': 'ts', 'callvolume': 'v',}, inplace=True)
data = pd.read_parquet("History_series_0028C91B.002795_filled.parquet")
data.rename(columns={ 'queueid': 'seriesid', 'time': 'ts', 'callvolume': 'v',}, inplace=True)
data['ts'] = pd.to_datetime(data['ts'])
data = data[['seriesid', 'ts', 'v']]
hist_len = 365
fcst_len = 90
print("-----------orig data -------------------")
# print(data.head()); print(data.shape)
print("-----------after daily agg -------------------")
agg = DailyAggregator('seriesid', 'ts', 'v')
data = agg.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after adding missing intervals -------------------")
filler = MissingTimeIntervalFiller('seriesid', 'ts', 'v', 'days', 1)
data = filler.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after pivoting -------------------")
pivoter = DataPivoter('seriesid', 'v', 'ts', 0)
data = pivoter.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after indexing -------------------")
indexer = IndexSetter('seriesid', drop_existing=True)
data = indexer.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after sampling -------------------")
sampler = SubTimeSeriesSampler(series_len=hist_len+fcst_len, num_reps=5)
data = sampler.fit_transform(data)
# print(data.head()); print(data.shape)
print("-----------after shuffling -------------------")
shuffler = DFShuffler()
data = shuffler.fit_transform(data)
print(data.head()); print(data.shape)
print("-----------after max scaling -------------------")
scaler = TSMinMaxScaler(scaling_len=hist_len)
data = scaler.fit_transform(data)
print(data.head()); print(data.shape)
print("-----------after X Y split -------------------")
splitter = TimeSeriesXYSplitter(hist_len, fcst_len)
data = splitter.fit_transform(data)
print(data.keys())
print(data['X'])
print(data['Y'])
|
normal
|
{
"blob_id": "9f7b1cfcc3c20910201fc67b5a641a5a89908bd1",
"index": 8980,
"step-1": "<mask token>\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n <mask token>\n\n def __init__(self, non_pivoted_columns, pivoting_column,\n pivoted_columns, fill_na_val):\n super().__init__()\n self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(\n non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(\n pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n def fit(self, X, y=None):\n return self\n <mask token>\n <mask token>\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):\n <mask token>\n DAYS = 'days'\n MINUTES = 'minutes'\n HOURS = 'hours'\n\n def __init__(self, id_columns, time_column, value_columns, time_unit,\n step_size):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n self.time_column = time_column\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n self.time_unit = time_unit\n self.step_size = int(step_size)\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n min_time = X[self.time_column].min()\n max_time = X[self.time_column].max()\n if self.time_unit == MissingTimeIntervalFiller.DAYS:\n num_steps = (max_time - min_time).days // self.step_size + 1\n all_time_ints = [(min_time + timedelta(days=x * self.step_size)\n ) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.HOURS:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1\n num_steps = (max_time - min_time).days + 1\n all_time_ints = [(min_time + timedelta(hours=x * self.step_size\n )) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.MINUTES:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (60 * self.step_size)) + 1\n all_time_ints = [(min_time + timedelta(minutes=x * self.\n step_size)) for x in range(num_steps)]\n else:\n raise Exception(\n f\"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].\"\n )\n full_intervals_df = pd.DataFrame(data=all_time_ints, columns=[self.\n time_column])\n id_cols_df = X[self.id_columns].drop_duplicates()\n full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(\n foo=1)).drop('foo', 1)\n full_df = full_df.merge(X[self.id_columns + [self.time_column] +\n self.value_columns], on=self.id_columns + [self.time_column],\n how='left')\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(full_df.head())\n print(full_df.shape)\n return full_df\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n \"\"\" Pivots a dataframe with a given column \"\"\"\n\n def __init__(self, non_pivoted_columns, pivoting_column,\n pivoted_columns, fill_na_val):\n super().__init__()\n self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(\n non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(\n pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n processed_X = X.pivot_table(index=self.non_pivoted_columns, aggfunc\n =sum, columns=self.pivoting_column, values=self.pivoted_columns,\n fill_value=self.fill_na_val).reset_index()\n processed_X.columns = [(col[0] if col[1] == '' else col[1]) for col in\n processed_X.columns]\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(processed_X.head())\n print(processed_X.shape)\n return processed_X\n\n def inverse_transform(self, preds_df):\n preds_df2 = pd.melt(preds_df.reset_index(), id_vars=self.\n non_pivoted_columns, value_vars=preds_df.columns, var_name=self\n .pivoting_column, value_name=self.pivoted_columns[0])\n return preds_df2\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass DailyAggregator(BaseEstimator, TransformerMixin):\n <mask token>\n\n def __init__(self, id_columns, time_column, value_columns):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n self.time_column = time_column\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n <mask token>\n\n def transform(self, X):\n X = X.copy()\n X[self.time_column] = X[self.time_column].dt.normalize()\n X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[\n self.value_columns].sum()\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):\n \"\"\" Adds missing time intervals in a time-series dataframe. \"\"\"\n DAYS = 'days'\n MINUTES = 'minutes'\n HOURS = 'hours'\n\n def __init__(self, id_columns, time_column, value_columns, time_unit,\n step_size):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n self.time_column = time_column\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n self.time_unit = time_unit\n self.step_size = int(step_size)\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n min_time = X[self.time_column].min()\n max_time = X[self.time_column].max()\n if self.time_unit == MissingTimeIntervalFiller.DAYS:\n num_steps = (max_time - min_time).days // self.step_size + 1\n all_time_ints = [(min_time + timedelta(days=x * self.step_size)\n ) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.HOURS:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1\n num_steps = (max_time - min_time).days + 1\n all_time_ints = [(min_time + timedelta(hours=x * self.step_size\n )) for x in range(num_steps)]\n elif self.time_unit == MissingTimeIntervalFiller.MINUTES:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (60 * self.step_size)) + 1\n all_time_ints = [(min_time + timedelta(minutes=x * self.\n step_size)) for x in range(num_steps)]\n else:\n raise Exception(\n f\"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].\"\n )\n full_intervals_df = pd.DataFrame(data=all_time_ints, columns=[self.\n time_column])\n id_cols_df = X[self.id_columns].drop_duplicates()\n full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(\n foo=1)).drop('foo', 1)\n full_df = full_df.merge(X[self.id_columns + [self.time_column] +\n self.value_columns], on=self.id_columns + [self.time_column],\n how='left')\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(full_df.head())\n print(full_df.shape)\n return full_df\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n \"\"\" Pivots a dataframe with a given column \"\"\"\n\n def __init__(self, non_pivoted_columns, pivoting_column,\n pivoted_columns, fill_na_val):\n super().__init__()\n self.non_pivoted_columns = [non_pivoted_columns] if not isinstance(\n non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(\n pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n processed_X = X.pivot_table(index=self.non_pivoted_columns, aggfunc\n =sum, columns=self.pivoting_column, values=self.pivoted_columns,\n fill_value=self.fill_na_val).reset_index()\n processed_X.columns = [(col[0] if col[1] == '' else col[1]) for col in\n processed_X.columns]\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(processed_X.head())\n print(processed_X.shape)\n return processed_X\n\n def inverse_transform(self, preds_df):\n preds_df2 = pd.melt(preds_df.reset_index(), id_vars=self.\n non_pivoted_columns, value_vars=preds_df.columns, var_name=self\n .pivoting_column, value_name=self.pivoted_columns[0])\n return preds_df2\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n \"\"\" Set index \"\"\"\n\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n \"\"\" Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n \"\"\"\n\n def __init__(self, series_len, num_reps):\n self.series_len = series_len\n self.num_reps = num_reps\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error sampling series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append(data_arr[i, rand_idx:rand_idx + self.\n series_len])\n idx = list(X.index) * self.num_reps\n col_names = [f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index=idx)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(sampled_data.head())\n print(sampled_data.shape)\n return sampled_data\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n \"\"\"\n Adds left right flipped version of tensor\n \"\"\"\n\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame(np.fliplr(X), columns=X.columns, index=X.index\n )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n \"\"\"\n Trims the length of a series to use latest data points \n \"\"\"\n\n def __init__(self, series_len):\n self.series_len = series_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n if curr_len < self.series_len:\n raise Exception(\n f'Error trimming series. Target length {self.series_len} exceeds current length {curr_len}'\n )\n X_vals = X.values[:, -self.series_len:]\n col_names = [f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X_vals.head())\n print(X_vals.shape)\n return X_vals\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n\n def __init__(self, shuffle=True):\n self.shuffle = shuffle\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.shuffle == False:\n return X\n X = X.sample(frac=1)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n if curr_len < self.scaling_len:\n msg = f\"\"\" Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n df = X if curr_len == self.scaling_len else X[X.columns[:self.\n scaling_len]]\n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered\n .index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n \"\"\"Scales history and forecast parts of time-series based on history data\"\"\"\n\n def __init__(self, scaling_len, upper_bound=5.0):\n if scaling_len < 2:\n raise Exception('Min Max scaling length must be >= 2')\n self.scaling_len = scaling_len\n self.min_vals = None\n self.max_vals = None\n self.ranges = None\n self.upper_bound = upper_bound\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n if self.scaling_len < 1:\n msg = f\"\"\" Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. \"\"\"\n raise Exception(msg)\n X_vals = X.values\n self.min_vals = np.expand_dims(X_vals[:, :self.scaling_len].min(\n axis=1), axis=1)\n self.max_vals = np.expand_dims(X_vals[:, :self.scaling_len].max(\n axis=1), axis=1)\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-05, self.ranges)\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges)\n X_vals = np.where(X_vals < self.upper_bound, X_vals, self.upper_bound)\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__}------------')\n print(X.head())\n print(X.shape)\n return X\n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n \"\"\"Splits the time series into X (history) and Y (forecast) series\"\"\"\n\n def __init__(self, X_len, Y_len):\n self.X_len = X_len\n self.Y_len = Y_len\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = 0 if self.Y_len == 'auto' else self.Y_len\n if curr_len < encode_len + decode_len:\n msg = f\"\"\" Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. \"\"\"\n raise Exception(msg)\n cols = X.columns\n if self.Y_len == 'auto':\n return {'X': X[cols[-self.X_len:]], 'Y': X[cols[-self.X_len:]]}\n if self.Y_len == 0:\n return {'X': X[cols[-self.X_len:]], 'Y': pd.DataFrame()}\n return {'X': X[cols[-(self.X_len + self.Y_len):-self.Y_len]], 'Y':\n X[cols[-self.Y_len:]]}\n\n\n<mask token>\n",
"step-5": "import numpy as np, pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom datetime import timedelta\nimport sys\n\nDEBUG = False\n\nclass DailyAggregator(BaseEstimator, TransformerMixin):\n ''' Aggregates time-series values to daily level. '''\n def __init__(self, id_columns, time_column, value_columns ):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n\n self.time_column = time_column\n\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n\n\n def fit(self, X, y=None): return self\n\n\n def transform(self, X):\n X = X.copy()\n X[self.time_column] = X[self.time_column].dt.normalize()\n X = X.groupby(by=self.id_columns + [self.time_column], as_index=False)[self.value_columns].sum()\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape)\n return X\n\n\n\nclass MissingTimeIntervalFiller(BaseEstimator, TransformerMixin):\n ''' Adds missing time intervals in a time-series dataframe. '''\n DAYS = 'days'\n MINUTES = 'minutes'\n HOURS = 'hours'\n\n def __init__(self, id_columns, time_column, value_columns, time_unit, step_size ):\n super().__init__()\n if not isinstance(id_columns, list):\n self.id_columns = [id_columns]\n else:\n self.id_columns = id_columns\n\n self.time_column = time_column\n\n if not isinstance(value_columns, list):\n self.value_columns = [value_columns]\n else:\n self.value_columns = value_columns\n\n self.time_unit = time_unit\n self.step_size = int(step_size)\n\n \n def fit(self, X, y=None): return self # do nothing in fit\n \n\n def transform(self, X):\n min_time = X[self.time_column].min()\n max_time = X[self.time_column].max() \n # print(min_time, max_time) \n\n if self.time_unit == MissingTimeIntervalFiller.DAYS:\n num_steps = ( (max_time - min_time).days // self.step_size ) + 1\n all_time_ints = [min_time + timedelta(days=x*self.step_size) for x in range(num_steps)]\n\n elif self.time_unit == MissingTimeIntervalFiller.HOURS:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (3600 * self.step_size)) + 1\n num_steps = (max_time - min_time).days + 1\n all_time_ints = [min_time + timedelta(hours=x*self.step_size) for x in range(num_steps)]\n\n elif self.time_unit == MissingTimeIntervalFiller.MINUTES:\n time_diff_sec = (max_time - min_time).total_seconds()\n num_steps = int(time_diff_sec // (60 * self.step_size)) + 1\n # print('num_steps', num_steps)\n all_time_ints = [min_time + timedelta(minutes=x*self.step_size) for x in range(num_steps)]\n else: \n raise Exception(f\"Unrecognized time unit: {self.time_unit}. Must be one of ['days', 'hours', 'minutes'].\")\n\n # create df of all time intervals\n full_intervals_df = pd.DataFrame(data = all_time_ints, columns = [self.time_column]) \n\n # get unique id-var values from original input data\n id_cols_df = X[self.id_columns].drop_duplicates()\n \n # get cross join of all time intervals and ids columns\n full_df = id_cols_df.assign(foo=1).merge(full_intervals_df.assign(foo=1)).drop('foo', 1)\n\n # merge original data on to this full table\n full_df = full_df.merge(X[self.id_columns + [self.time_column] + self.value_columns], \n on=self.id_columns + [self.time_column], how='left')\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(full_df.head())\n print(full_df.shape)\n return full_df\n\n\n\nclass DataPivoter(BaseEstimator, TransformerMixin):\n ''' Pivots a dataframe with a given column '''\n\n def __init__(self, non_pivoted_columns, pivoting_column, pivoted_columns, fill_na_val):\n super().__init__() \n self.non_pivoted_columns = \\\n [non_pivoted_columns] if not isinstance(non_pivoted_columns, list) else non_pivoted_columns\n self.pivoted_columns = [pivoted_columns] if not isinstance(pivoted_columns, list) else pivoted_columns\n self.pivoting_column = pivoting_column\n self.fill_na_val = fill_na_val\n\n\n def fit(self, X, y=None): return self # do nothing in fit\n\n\n def transform(self, X):\n processed_X = X.pivot_table(index = self.non_pivoted_columns, \n aggfunc=sum,\n columns=self.pivoting_column, \n values=self.pivoted_columns, \n fill_value = self.fill_na_val\n ).reset_index()\n\n \n # pivot table will result in multi column index. To get a regular column names\n processed_X.columns = [ col[0] if col[1] == '' else col[1] for col in processed_X.columns ] \n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(processed_X.head())\n print(processed_X.shape) \n return processed_X\n\n \n def inverse_transform(self, preds_df):\n # unpivot given dataframe\n preds_df2 = pd.melt(preds_df.reset_index(), \n id_vars=self.non_pivoted_columns,\n value_vars=preds_df.columns,\n var_name = self.pivoting_column,\n value_name = self.pivoted_columns[0]\n )\n return preds_df2\n\n\n\nclass IndexSetter(BaseEstimator, TransformerMixin):\n ''' Set index '''\n def __init__(self, index_cols, drop_existing):\n self.index_cols = index_cols\n self.drop_existing = drop_existing\n \n def fit(self, X, y=None): return self # do nothing in fit\n\n\n def transform(self, X):\n X = X.copy()\n X.reset_index(drop=self.drop_existing, inplace=True)\n X.set_index(self.index_cols, inplace=True)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n\n\nclass SubTimeSeriesSampler(BaseEstimator, TransformerMixin):\n ''' Samples a sub-series of length t <= the original series of length T. Assumes series is in columns \n Original time-series time labels (column headers) are replaced with t_0, t_1, ... t_<series_len>.\n '''\n def __init__(self, series_len, num_reps): \n self.series_len = series_len\n self.num_reps = num_reps\n\n\n def fit(self, X, y=None): return self\n\n\n def transform(self, X):\n curr_len = X.shape[1]\n\n if curr_len < self.series_len: \n raise Exception(f\"Error sampling series. Target length {self.series_len} exceeds current length {curr_len}\")\n\n sampled_data = []\n data_arr = X.values\n for _ in range(self.num_reps):\n for i in range(data_arr.shape[0]):\n rand_idx = np.random.randint(0, curr_len - self.series_len)\n sampled_data.append( data_arr[i, rand_idx: rand_idx + self.series_len] )\n \n idx = list(X.index) * self.num_reps\n col_names = [ f't_{i}' for i in range(self.series_len)]\n sampled_data = pd.DataFrame(sampled_data, columns=col_names, index= idx)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(sampled_data.head())\n print(sampled_data.shape) \n return sampled_data\n\n\n\nclass AddLeftRightFlipper(BaseEstimator, TransformerMixin):\n '''\n Adds left right flipped version of tensor\n '''\n def __init__(self): pass\n def fit(self, X, y=None): return self\n\n def transform(self, X):\n X_flipped = pd.DataFrame( np.fliplr(X), columns=X.columns, index=X.index )\n X = pd.concat([X, X_flipped], axis=0, ignore_index=True)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n\n\nclass SeriesLengthTrimmer(BaseEstimator, TransformerMixin):\n '''\n Trims the length of a series to use latest data points \n '''\n def __init__(self, series_len): \n self.series_len = series_len\n\n def fit(self, X, y=None): return self\n\n def transform(self, X):\n curr_len = X.shape[1]\n\n if curr_len < self.series_len: \n raise Exception(f\"Error trimming series. Target length {self.series_len} exceeds current length {curr_len}\")\n \n X_vals = X.values[:, -self.series_len:]\n col_names = [ f't_{i}' for i in range(self.series_len)]\n X_vals = pd.DataFrame(X_vals, columns=col_names, index=X.index) \n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X_vals.head())\n print(X_vals.shape) \n return X_vals\n\n\n\nclass DFShuffler(BaseEstimator, TransformerMixin):\n def __init__(self, shuffle = True): \n self.shuffle = shuffle\n\n def fit(self, X, y=None): return self\n\n def transform(self, X, y=None): \n if self.shuffle == False: return X \n X = X.sample(frac=1) \n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n\n\nclass TSMinMaxScaler2(BaseEstimator, TransformerMixin):\n '''Scales history and forecast parts of time-series based on history data'''\n def __init__(self, scaling_len, upper_bound = 5.): \n if scaling_len < 2: raise Exception(\"Min Max scaling length must be >= 2\")\n self.scaling_len = scaling_len\n self.max_scaler = MinMaxScaler()\n self.row_sums = None\n self.upper_bound = upper_bound\n \n\n def fit(self, X, y=None): \n return self\n \n def transform(self, X, y=None): \n curr_len = X.shape[1]\n if curr_len < self.scaling_len: \n msg = f''' Error scaling series. \n Sum of scaling_len {self.scaling_len} should not exceed series length {curr_len}. '''\n raise Exception(msg)\n \n df = X if curr_len == self.scaling_len else X[ X.columns[ : self.scaling_len ] ] \n self.row_sums = df.sum(axis=1)\n df = df[self.row_sums != 0]\n self.max_scaler.fit(df.T)\n \n # print(X.shape, self.row_sums.shape)\n # sys.exit()\n X_filtered = X[self.row_sums != 0].copy()\n vals = self.max_scaler.transform(X_filtered.T).T\n vals = np.where(vals > self.upper_bound, self.upper_bound, vals)\n\n X = pd.DataFrame(vals, columns=X_filtered.columns, index=X_filtered.index)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n\n def inverse_transform(self, X):\n return self.max_scaler.inverse_transform(X.T).T\n\n\n\nclass TSMinMaxScaler(BaseEstimator, TransformerMixin):\n '''Scales history and forecast parts of time-series based on history data'''\n def __init__(self, scaling_len, upper_bound = 5.): \n if scaling_len < 2: raise Exception(\"Min Max scaling length must be >= 2\")\n self.scaling_len = scaling_len\n self.min_vals = None \n self.max_vals = None \n self.ranges = None \n self.upper_bound = upper_bound\n \n\n def fit(self, X, y=None): return self\n\n \n def transform(self, X, y=None): \n\n if self.scaling_len < 1: \n msg = f''' Error scaling series. \n scaling_len needs to be at least 2. Given length is {self.scaling_len}. '''\n raise Exception(msg)\n \n\n X_vals = X.values\n self.min_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].min(axis=1), axis = 1)\n self.max_vals = np.expand_dims( X_vals[ :, : self.scaling_len ].max(axis=1), axis = 1)\n\n self.ranges = self.max_vals - self.min_vals\n self.ranges = np.where(self.ranges == 0, 1e-5, self.ranges)\n # print(self.min_vals.shape, self.ranges.shape)\n\n # sys.exit()\n X_vals = X_vals - self.min_vals\n X_vals = np.divide(X_vals, self.ranges) \n X_vals = np.where( X_vals < self.upper_bound, X_vals, self.upper_bound)\n\n X = pd.DataFrame(X_vals, columns=X.columns, index=X.index)\n if DEBUG:\n print(f'-------after {__class__.__name__ }------------')\n print(X.head())\n print(X.shape) \n return X\n \n\n def inverse_transform(self, X):\n X = X * self.ranges\n X = X + self.min_vals\n return X\n\n\n\nclass TimeSeriesXYSplitter(BaseEstimator, TransformerMixin):\n '''Splits the time series into X (history) and Y (forecast) series'''\n def __init__(self, X_len, Y_len): \n self.X_len = X_len\n self.Y_len = Y_len\n \n\n def fit(self, X, y=None): return self\n\n def transform(self, X, y=None): \n curr_len = X.shape[1]\n encode_len = self.X_len\n decode_len = (0 if self.Y_len == 'auto' else self.Y_len)\n\n if curr_len < encode_len + decode_len: \n msg = f''' Error splitting series. \n Sum of X_len {self.X_len} and Y_len {self.Y_len} should not exceed series length {curr_len}. '''\n raise Exception(msg)\n\n # bit of a hack but sklearn pipeline only allows one thing to be returned in transform()\n cols = X.columns \n if self.Y_len == 'auto': return { 'X': X[cols[-self.X_len :]], 'Y': X[cols[-self.X_len :]] }\n if self.Y_len == 0: return { 'X': X[cols[-self.X_len :]], 'Y': pd.DataFrame() }\n return {\n 'X': X[cols[-( self.X_len + self.Y_len) : -self.Y_len] ], \n 'Y':X[cols[ -self.Y_len : ] ] \n }\n\n\n\nif __name__ == \"__main__\": \n\n # data = pd.read_parquet(\"wfm_single_q_Internal_daily_history.parquet\")\n # data = pd.read_parquet(\"WFM_200q_Internal_daily_history.parquet\")\n # data.rename(columns={ 'queueid': 'seriesid', 'date': 'ts', 'callvolume': 'v',}, inplace=True)\n \n data = pd.read_parquet(\"History_series_0028C91B.002795_filled.parquet\")\n data.rename(columns={ 'queueid': 'seriesid', 'time': 'ts', 'callvolume': 'v',}, inplace=True)\n\n \n data['ts'] = pd.to_datetime(data['ts'])\n data = data[['seriesid', 'ts', 'v']]\n\n hist_len = 365\n fcst_len = 90\n\n print(\"-----------orig data -------------------\")\n # print(data.head()); print(data.shape) \n \n print(\"-----------after daily agg -------------------\")\n agg = DailyAggregator('seriesid', 'ts', 'v')\n data = agg.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after adding missing intervals -------------------\")\n filler = MissingTimeIntervalFiller('seriesid', 'ts', 'v', 'days', 1)\n data = filler.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after pivoting -------------------\")\n pivoter = DataPivoter('seriesid', 'v', 'ts', 0)\n data = pivoter.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after indexing -------------------\")\n indexer = IndexSetter('seriesid', drop_existing=True)\n data = indexer.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after sampling -------------------\")\n sampler = SubTimeSeriesSampler(series_len=hist_len+fcst_len, num_reps=5)\n data = sampler.fit_transform(data)\n # print(data.head()); print(data.shape) \n\n print(\"-----------after shuffling -------------------\")\n shuffler = DFShuffler()\n data = shuffler.fit_transform(data)\n print(data.head()); print(data.shape) \n\n print(\"-----------after max scaling -------------------\")\n scaler = TSMinMaxScaler(scaling_len=hist_len)\n data = scaler.fit_transform(data)\n print(data.head()); print(data.shape) \n\n print(\"-----------after X Y split -------------------\")\n splitter = TimeSeriesXYSplitter(hist_len, fcst_len)\n data = splitter.fit_transform(data)\n print(data.keys())\n print(data['X'])\n print(data['Y'])\n\n\n\n",
"step-ids": [
41,
44,
52,
56,
62
]
}
|
[
41,
44,
52,
56,
62
] |
<|reserved_special_token_0|>
def write_packets_from(f, fph, base_name, namespace, P):
if base_name != 'ServerPacket':
f.write('var {base_name}ID = {{ \n'.format(base_name=base_name))
for i, x in enumerate(P):
if x:
f.write(' {name} : {packet_id}'.format(base_name=
base_name, name=x.name, packet_id=i))
f.write(',\n')
f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\n}};\n'.
format(base_name=base_name, packet_id=len(P)))
"""
f.write(""\"
function {base_name}Factory(buffer) {{
if (buffer.length() < 1) return 0;
var p;
PacketID = buffer.PeekByte();
switch (PacketID) {{
""\".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write(""\"
case {i}:
p = new {name}(buffer);
break;
""\".format(i=i, name=x.name))
f.write(""\"
}}
return p;
}}
""\".format())
"""
for i, x in enumerate(P):
if not x:
continue
header_fields = []
header_fields_signature = []
items_assign_e = []
items_assign_build = []
ctor_fields = ''
min_byte_count = 0
ctor_fields_bytequeue = ''
parametros_fields = ''
parametros_args = ''
serialize_fields = ''
if x.name == 'MultiMessage':
escribir_multimessage(f)
continue
for y in x.args:
arg_name = y[0]
arg_type = y[1] & 255
arg_type_str = TYPE_TO_STR[arg_type]
arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]
arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY
type_reader_name = TYPE_TO_READER_NAME[arg_type]
type_writer_name = TYPE_TO_WRITER_NAME[arg_type]
ctor_fields += ', ' + arg_name + '()'
items_assign_e.append(' {arg_name}: {arg_name},'.
format(arg_name=arg_name))
items_assign_build.append(' e.{arg_name}= {arg_name};'.
format(arg_name=arg_name))
if arg_is_array:
array_size = y[2]
min_byte_count += TYPE_SIZE[arg_type] * array_size
header_fields.append(' {arg_name}; '.format(arg_type_str
=arg_type_str, arg_name=arg_name, array_size=array_size))
header_fields_signature.append('{arg_name} '.format(
arg_type_str=arg_type_sig_str, arg_name=arg_name,
array_size=array_size))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name, array_size=array_size)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name, array_size=array_size)
else:
min_byte_count += TYPE_SIZE[arg_type]
header_fields.append(' {arg_type_str} {arg_name}; '.
format(arg_type_str=arg_type_str, arg_name=arg_name))
header_fields_signature.append('{arg_type_str} {arg_name}'.
format(arg_type_str=arg_type_sig_str, arg_name=arg_name))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name)
format_args = {'base_name': base_name, 'name': x.name,
'header_fields': '\n'.join(header_fields),
'header_fields_signature': ', '.join(header_fields_signature),
'items_assign_e': '\n'.join(items_assign_e),
'items_assign_build': '\n'.join(items_assign_build),
'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':
min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,
'serialize_fields': serialize_fields, 'parametros_fields':
parametros_fields, 'parametros_args': parametros_args}
if base_name != 'ServerPacket':
f.write(x.get_header_fmt().format(**format_args))
BUILDERS.append(x.get_builder_fmt().format(**format_args))
if base_name == 'ServerPacket':
HANDLERS.append(x.get_handler_fmt().format(**format_args))
if base_name == 'ServerPacket':
dec_dispatch = x.get_parametros_fmt().format(**format_args)
pos = dec_dispatch.rfind(',')
if pos > 0:
dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]
DECODE_DISPATCH.append(dec_dispatch)
if base_name == 'ServerPacket':
args_handler = x.get_argumentosHandler_fmt().format(**format_args)
pos = args_handler.rfind(',')
if pos > 0:
args_handler = args_handler[:pos] + args_handler[pos + 1:]
pos = args_handler.rfind('\n')
args_handler = args_handler[:pos] + args_handler[pos + 1:]
ARGS_HANDLER.append(args_handler)
if base_name == 'ServerPacket':
f.write(
"""
function {base_name}DecodeAndDispatch(buffer, handler) {{
if (buffer.length() < 1) return;
var PacketID = buffer.ReadByte();
switch (PacketID) {{
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
f.write(
"""
case {i}:
{{
{decode_dispatch}
break;
}}
"""
.format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))
f.write(
"""
default:
{{
msg = "error decoding packet id: " + PacketID;
throw new Error(msg);
}}
}}
}}
"""
.format())
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
fph.write('\n\thandle{name}: function ({arg_handler}){{ \n'.
format(base_name=base_name, name=x.name, arg_handler=
ARGS_HANDLER.pop(0)))
fph.write('\t\tlog.network("TODO: handle{name} ");\n\t}},\n'.
format(base_name=base_name, name=x.name))
for i, x in enumerate(P):
if not x:
continue
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
)
def write_packets():
f = open('protocol.js', 'w')
fph = open('protocolhandlerAux.js', 'w')
f.write(
"\n/* Automatically generated file */\n\ndefine(['enums'], function (Enums) {\n"
)
write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)
write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)
write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)
f.write("""
class Protocolo{
""")
for builder in BUILDERS:
f.write(builder)
f.write(
"""
ServerPacketDecodeAndDispatch(buffer, handler){
ServerPacketDecodeAndDispatch(buffer, handler);
}
"""
)
f.write('\n }\n\n return Protocolo;\n}); ')
f.close()
fph.close()
def escribir_multimessage(f):
DECODE_DISPATCH.append(
"""
var msgIdx = buffer.ReadByte();
switch (msgIdx) {
case Enums.eMessage.NPCHitUser:
handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHitNPC:
handler.handleUserHitNPC(buffer.ReadLong());
break;
case Enums.eMessage.UserAttackedSwing:
handler.handleUserAttackedSwing(buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedByUser:
handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedUser:
handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.WorkRequestTarget:
handler.handleWorkRequestTarget(buffer.ReadByte());
break;
case Enums.eMessage.HaveKilledUser:
handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());
break;
case Enums.eMessage.UserKill:
handler.handleUserKill(buffer.ReadInteger());
break;
case Enums.eMessage.Home:
handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());
break;
case Enums.eMessage.DontSeeAnything:
handler.handleDontSeeAnything();
break;
case Enums.eMessage.NPCSwing:
handler.handleNPCSwing();
break;
case Enums.eMessage.NPCKillUser:
handler.handleNPCKillUser();
break;
case Enums.eMessage.BlockedWithShieldUser:
handler.handleBlockedWithShieldUser();
break;
case Enums.eMessage.BlockedWithShieldOther:
handler.handleBlockedWithShieldOther();
break;
case Enums.eMessage.UserSwing:
handler.handleUserSwing();
break;
case Enums.eMessage.SafeModeOn:
handler.handleSafeModeOn();
break;
case Enums.eMessage.SafeModeOff:
handler.handleSafeModeOff();
break;
case Enums.eMessage.ResuscitationSafeOff:
handler.handleResuscitationSafeOff();
break;
case Enums.eMessage.ResuscitationSafeOn:
handler.handleResuscitationSafeOn();
break;
case Enums.eMessage.NobilityLost:
handler.handleNobilityLost();
break;
case Enums.eMessage.CantUseWhileMeditating:
handler.handleCantUseWhileMeditating();
break;
case Enums.eMessage.EarnExp:
handler.handleEarnExp();
break;
case Enums.eMessage.FinishHome:
handler.handleFinishHome();
break;
case Enums.eMessage.CancelHome:
handler.handleCancelHome();
break;
default:
throw new Error("Multimessage: " + msgIdx + " no reconocido por el protocolo");
}
"""
)
ARGS_HANDLER.append('msgIdx,args')
def main():
write_packets()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def write_packets_from(f, fph, base_name, namespace, P):
if base_name != 'ServerPacket':
f.write('var {base_name}ID = {{ \n'.format(base_name=base_name))
for i, x in enumerate(P):
if x:
f.write(' {name} : {packet_id}'.format(base_name=
base_name, name=x.name, packet_id=i))
f.write(',\n')
f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\n}};\n'.
format(base_name=base_name, packet_id=len(P)))
"""
f.write(""\"
function {base_name}Factory(buffer) {{
if (buffer.length() < 1) return 0;
var p;
PacketID = buffer.PeekByte();
switch (PacketID) {{
""\".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write(""\"
case {i}:
p = new {name}(buffer);
break;
""\".format(i=i, name=x.name))
f.write(""\"
}}
return p;
}}
""\".format())
"""
for i, x in enumerate(P):
if not x:
continue
header_fields = []
header_fields_signature = []
items_assign_e = []
items_assign_build = []
ctor_fields = ''
min_byte_count = 0
ctor_fields_bytequeue = ''
parametros_fields = ''
parametros_args = ''
serialize_fields = ''
if x.name == 'MultiMessage':
escribir_multimessage(f)
continue
for y in x.args:
arg_name = y[0]
arg_type = y[1] & 255
arg_type_str = TYPE_TO_STR[arg_type]
arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]
arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY
type_reader_name = TYPE_TO_READER_NAME[arg_type]
type_writer_name = TYPE_TO_WRITER_NAME[arg_type]
ctor_fields += ', ' + arg_name + '()'
items_assign_e.append(' {arg_name}: {arg_name},'.
format(arg_name=arg_name))
items_assign_build.append(' e.{arg_name}= {arg_name};'.
format(arg_name=arg_name))
if arg_is_array:
array_size = y[2]
min_byte_count += TYPE_SIZE[arg_type] * array_size
header_fields.append(' {arg_name}; '.format(arg_type_str
=arg_type_str, arg_name=arg_name, array_size=array_size))
header_fields_signature.append('{arg_name} '.format(
arg_type_str=arg_type_sig_str, arg_name=arg_name,
array_size=array_size))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name, array_size=array_size)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name, array_size=array_size)
else:
min_byte_count += TYPE_SIZE[arg_type]
header_fields.append(' {arg_type_str} {arg_name}; '.
format(arg_type_str=arg_type_str, arg_name=arg_name))
header_fields_signature.append('{arg_type_str} {arg_name}'.
format(arg_type_str=arg_type_sig_str, arg_name=arg_name))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name)
format_args = {'base_name': base_name, 'name': x.name,
'header_fields': '\n'.join(header_fields),
'header_fields_signature': ', '.join(header_fields_signature),
'items_assign_e': '\n'.join(items_assign_e),
'items_assign_build': '\n'.join(items_assign_build),
'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':
min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,
'serialize_fields': serialize_fields, 'parametros_fields':
parametros_fields, 'parametros_args': parametros_args}
if base_name != 'ServerPacket':
f.write(x.get_header_fmt().format(**format_args))
BUILDERS.append(x.get_builder_fmt().format(**format_args))
if base_name == 'ServerPacket':
HANDLERS.append(x.get_handler_fmt().format(**format_args))
if base_name == 'ServerPacket':
dec_dispatch = x.get_parametros_fmt().format(**format_args)
pos = dec_dispatch.rfind(',')
if pos > 0:
dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]
DECODE_DISPATCH.append(dec_dispatch)
if base_name == 'ServerPacket':
args_handler = x.get_argumentosHandler_fmt().format(**format_args)
pos = args_handler.rfind(',')
if pos > 0:
args_handler = args_handler[:pos] + args_handler[pos + 1:]
pos = args_handler.rfind('\n')
args_handler = args_handler[:pos] + args_handler[pos + 1:]
ARGS_HANDLER.append(args_handler)
if base_name == 'ServerPacket':
f.write(
"""
function {base_name}DecodeAndDispatch(buffer, handler) {{
if (buffer.length() < 1) return;
var PacketID = buffer.ReadByte();
switch (PacketID) {{
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
f.write(
"""
case {i}:
{{
{decode_dispatch}
break;
}}
"""
.format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))
f.write(
"""
default:
{{
msg = "error decoding packet id: " + PacketID;
throw new Error(msg);
}}
}}
}}
"""
.format())
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
fph.write('\n\thandle{name}: function ({arg_handler}){{ \n'.
format(base_name=base_name, name=x.name, arg_handler=
ARGS_HANDLER.pop(0)))
fph.write('\t\tlog.network("TODO: handle{name} ");\n\t}},\n'.
format(base_name=base_name, name=x.name))
for i, x in enumerate(P):
if not x:
continue
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
)
def write_packets():
f = open('protocol.js', 'w')
fph = open('protocolhandlerAux.js', 'w')
f.write(
"\n/* Automatically generated file */\n\ndefine(['enums'], function (Enums) {\n"
)
write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)
write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)
write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)
f.write("""
class Protocolo{
""")
for builder in BUILDERS:
f.write(builder)
f.write(
"""
ServerPacketDecodeAndDispatch(buffer, handler){
ServerPacketDecodeAndDispatch(buffer, handler);
}
"""
)
f.write('\n }\n\n return Protocolo;\n}); ')
f.close()
fph.close()
def escribir_multimessage(f):
DECODE_DISPATCH.append(
"""
var msgIdx = buffer.ReadByte();
switch (msgIdx) {
case Enums.eMessage.NPCHitUser:
handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHitNPC:
handler.handleUserHitNPC(buffer.ReadLong());
break;
case Enums.eMessage.UserAttackedSwing:
handler.handleUserAttackedSwing(buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedByUser:
handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedUser:
handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.WorkRequestTarget:
handler.handleWorkRequestTarget(buffer.ReadByte());
break;
case Enums.eMessage.HaveKilledUser:
handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());
break;
case Enums.eMessage.UserKill:
handler.handleUserKill(buffer.ReadInteger());
break;
case Enums.eMessage.Home:
handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());
break;
case Enums.eMessage.DontSeeAnything:
handler.handleDontSeeAnything();
break;
case Enums.eMessage.NPCSwing:
handler.handleNPCSwing();
break;
case Enums.eMessage.NPCKillUser:
handler.handleNPCKillUser();
break;
case Enums.eMessage.BlockedWithShieldUser:
handler.handleBlockedWithShieldUser();
break;
case Enums.eMessage.BlockedWithShieldOther:
handler.handleBlockedWithShieldOther();
break;
case Enums.eMessage.UserSwing:
handler.handleUserSwing();
break;
case Enums.eMessage.SafeModeOn:
handler.handleSafeModeOn();
break;
case Enums.eMessage.SafeModeOff:
handler.handleSafeModeOff();
break;
case Enums.eMessage.ResuscitationSafeOff:
handler.handleResuscitationSafeOff();
break;
case Enums.eMessage.ResuscitationSafeOn:
handler.handleResuscitationSafeOn();
break;
case Enums.eMessage.NobilityLost:
handler.handleNobilityLost();
break;
case Enums.eMessage.CantUseWhileMeditating:
handler.handleCantUseWhileMeditating();
break;
case Enums.eMessage.EarnExp:
handler.handleEarnExp();
break;
case Enums.eMessage.FinishHome:
handler.handleFinishHome();
break;
case Enums.eMessage.CancelHome:
handler.handleCancelHome();
break;
default:
throw new Error("Multimessage: " + msgIdx + " no reconocido por el protocolo");
}
"""
)
ARGS_HANDLER.append('msgIdx,args')
def main():
write_packets()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
BUILDERS = []
HANDLERS = []
DECODE_DISPATCH = []
ARGS_HANDLER = []
def write_packets_from(f, fph, base_name, namespace, P):
if base_name != 'ServerPacket':
f.write('var {base_name}ID = {{ \n'.format(base_name=base_name))
for i, x in enumerate(P):
if x:
f.write(' {name} : {packet_id}'.format(base_name=
base_name, name=x.name, packet_id=i))
f.write(',\n')
f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\n}};\n'.
format(base_name=base_name, packet_id=len(P)))
"""
f.write(""\"
function {base_name}Factory(buffer) {{
if (buffer.length() < 1) return 0;
var p;
PacketID = buffer.PeekByte();
switch (PacketID) {{
""\".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write(""\"
case {i}:
p = new {name}(buffer);
break;
""\".format(i=i, name=x.name))
f.write(""\"
}}
return p;
}}
""\".format())
"""
for i, x in enumerate(P):
if not x:
continue
header_fields = []
header_fields_signature = []
items_assign_e = []
items_assign_build = []
ctor_fields = ''
min_byte_count = 0
ctor_fields_bytequeue = ''
parametros_fields = ''
parametros_args = ''
serialize_fields = ''
if x.name == 'MultiMessage':
escribir_multimessage(f)
continue
for y in x.args:
arg_name = y[0]
arg_type = y[1] & 255
arg_type_str = TYPE_TO_STR[arg_type]
arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]
arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY
type_reader_name = TYPE_TO_READER_NAME[arg_type]
type_writer_name = TYPE_TO_WRITER_NAME[arg_type]
ctor_fields += ', ' + arg_name + '()'
items_assign_e.append(' {arg_name}: {arg_name},'.
format(arg_name=arg_name))
items_assign_build.append(' e.{arg_name}= {arg_name};'.
format(arg_name=arg_name))
if arg_is_array:
array_size = y[2]
min_byte_count += TYPE_SIZE[arg_type] * array_size
header_fields.append(' {arg_name}; '.format(arg_type_str
=arg_type_str, arg_name=arg_name, array_size=array_size))
header_fields_signature.append('{arg_name} '.format(
arg_type_str=arg_type_sig_str, arg_name=arg_name,
array_size=array_size))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name, array_size=array_size)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name, array_size=array_size)
else:
min_byte_count += TYPE_SIZE[arg_type]
header_fields.append(' {arg_type_str} {arg_name}; '.
format(arg_type_str=arg_type_str, arg_name=arg_name))
header_fields_signature.append('{arg_type_str} {arg_name}'.
format(arg_type_str=arg_type_sig_str, arg_name=arg_name))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name)
format_args = {'base_name': base_name, 'name': x.name,
'header_fields': '\n'.join(header_fields),
'header_fields_signature': ', '.join(header_fields_signature),
'items_assign_e': '\n'.join(items_assign_e),
'items_assign_build': '\n'.join(items_assign_build),
'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':
min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,
'serialize_fields': serialize_fields, 'parametros_fields':
parametros_fields, 'parametros_args': parametros_args}
if base_name != 'ServerPacket':
f.write(x.get_header_fmt().format(**format_args))
BUILDERS.append(x.get_builder_fmt().format(**format_args))
if base_name == 'ServerPacket':
HANDLERS.append(x.get_handler_fmt().format(**format_args))
if base_name == 'ServerPacket':
dec_dispatch = x.get_parametros_fmt().format(**format_args)
pos = dec_dispatch.rfind(',')
if pos > 0:
dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]
DECODE_DISPATCH.append(dec_dispatch)
if base_name == 'ServerPacket':
args_handler = x.get_argumentosHandler_fmt().format(**format_args)
pos = args_handler.rfind(',')
if pos > 0:
args_handler = args_handler[:pos] + args_handler[pos + 1:]
pos = args_handler.rfind('\n')
args_handler = args_handler[:pos] + args_handler[pos + 1:]
ARGS_HANDLER.append(args_handler)
if base_name == 'ServerPacket':
f.write(
"""
function {base_name}DecodeAndDispatch(buffer, handler) {{
if (buffer.length() < 1) return;
var PacketID = buffer.ReadByte();
switch (PacketID) {{
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
f.write(
"""
case {i}:
{{
{decode_dispatch}
break;
}}
"""
.format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))
f.write(
"""
default:
{{
msg = "error decoding packet id: " + PacketID;
throw new Error(msg);
}}
}}
}}
"""
.format())
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
fph.write('\n\thandle{name}: function ({arg_handler}){{ \n'.
format(base_name=base_name, name=x.name, arg_handler=
ARGS_HANDLER.pop(0)))
fph.write('\t\tlog.network("TODO: handle{name} ");\n\t}},\n'.
format(base_name=base_name, name=x.name))
for i, x in enumerate(P):
if not x:
continue
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
)
def write_packets():
f = open('protocol.js', 'w')
fph = open('protocolhandlerAux.js', 'w')
f.write(
"\n/* Automatically generated file */\n\ndefine(['enums'], function (Enums) {\n"
)
write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)
write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)
write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)
f.write("""
class Protocolo{
""")
for builder in BUILDERS:
f.write(builder)
f.write(
"""
ServerPacketDecodeAndDispatch(buffer, handler){
ServerPacketDecodeAndDispatch(buffer, handler);
}
"""
)
f.write('\n }\n\n return Protocolo;\n}); ')
f.close()
fph.close()
def escribir_multimessage(f):
DECODE_DISPATCH.append(
"""
var msgIdx = buffer.ReadByte();
switch (msgIdx) {
case Enums.eMessage.NPCHitUser:
handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHitNPC:
handler.handleUserHitNPC(buffer.ReadLong());
break;
case Enums.eMessage.UserAttackedSwing:
handler.handleUserAttackedSwing(buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedByUser:
handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedUser:
handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.WorkRequestTarget:
handler.handleWorkRequestTarget(buffer.ReadByte());
break;
case Enums.eMessage.HaveKilledUser:
handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());
break;
case Enums.eMessage.UserKill:
handler.handleUserKill(buffer.ReadInteger());
break;
case Enums.eMessage.Home:
handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());
break;
case Enums.eMessage.DontSeeAnything:
handler.handleDontSeeAnything();
break;
case Enums.eMessage.NPCSwing:
handler.handleNPCSwing();
break;
case Enums.eMessage.NPCKillUser:
handler.handleNPCKillUser();
break;
case Enums.eMessage.BlockedWithShieldUser:
handler.handleBlockedWithShieldUser();
break;
case Enums.eMessage.BlockedWithShieldOther:
handler.handleBlockedWithShieldOther();
break;
case Enums.eMessage.UserSwing:
handler.handleUserSwing();
break;
case Enums.eMessage.SafeModeOn:
handler.handleSafeModeOn();
break;
case Enums.eMessage.SafeModeOff:
handler.handleSafeModeOff();
break;
case Enums.eMessage.ResuscitationSafeOff:
handler.handleResuscitationSafeOff();
break;
case Enums.eMessage.ResuscitationSafeOn:
handler.handleResuscitationSafeOn();
break;
case Enums.eMessage.NobilityLost:
handler.handleNobilityLost();
break;
case Enums.eMessage.CantUseWhileMeditating:
handler.handleCantUseWhileMeditating();
break;
case Enums.eMessage.EarnExp:
handler.handleEarnExp();
break;
case Enums.eMessage.FinishHome:
handler.handleFinishHome();
break;
case Enums.eMessage.CancelHome:
handler.handleCancelHome();
break;
default:
throw new Error("Multimessage: " + msgIdx + " no reconocido por el protocolo");
}
"""
)
ARGS_HANDLER.append('msgIdx,args')
def main():
write_packets()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from genpackets import *
from gendefs_js import *
BUILDERS = []
HANDLERS = []
DECODE_DISPATCH = []
ARGS_HANDLER = []
def write_packets_from(f, fph, base_name, namespace, P):
if base_name != 'ServerPacket':
f.write('var {base_name}ID = {{ \n'.format(base_name=base_name))
for i, x in enumerate(P):
if x:
f.write(' {name} : {packet_id}'.format(base_name=
base_name, name=x.name, packet_id=i))
f.write(',\n')
f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\n}};\n'.
format(base_name=base_name, packet_id=len(P)))
"""
f.write(""\"
function {base_name}Factory(buffer) {{
if (buffer.length() < 1) return 0;
var p;
PacketID = buffer.PeekByte();
switch (PacketID) {{
""\".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write(""\"
case {i}:
p = new {name}(buffer);
break;
""\".format(i=i, name=x.name))
f.write(""\"
}}
return p;
}}
""\".format())
"""
for i, x in enumerate(P):
if not x:
continue
header_fields = []
header_fields_signature = []
items_assign_e = []
items_assign_build = []
ctor_fields = ''
min_byte_count = 0
ctor_fields_bytequeue = ''
parametros_fields = ''
parametros_args = ''
serialize_fields = ''
if x.name == 'MultiMessage':
escribir_multimessage(f)
continue
for y in x.args:
arg_name = y[0]
arg_type = y[1] & 255
arg_type_str = TYPE_TO_STR[arg_type]
arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]
arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY
type_reader_name = TYPE_TO_READER_NAME[arg_type]
type_writer_name = TYPE_TO_WRITER_NAME[arg_type]
ctor_fields += ', ' + arg_name + '()'
items_assign_e.append(' {arg_name}: {arg_name},'.
format(arg_name=arg_name))
items_assign_build.append(' e.{arg_name}= {arg_name};'.
format(arg_name=arg_name))
if arg_is_array:
array_size = y[2]
min_byte_count += TYPE_SIZE[arg_type] * array_size
header_fields.append(' {arg_name}; '.format(arg_type_str
=arg_type_str, arg_name=arg_name, array_size=array_size))
header_fields_signature.append('{arg_name} '.format(
arg_type_str=arg_type_sig_str, arg_name=arg_name,
array_size=array_size))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name, array_size=array_size)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name, array_size=array_size)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name, array_size=array_size)
else:
min_byte_count += TYPE_SIZE[arg_type]
header_fields.append(' {arg_type_str} {arg_name}; '.
format(arg_type_str=arg_type_str, arg_name=arg_name))
header_fields_signature.append('{arg_type_str} {arg_name}'.
format(arg_type_str=arg_type_sig_str, arg_name=arg_name))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(
arg_is_array).format(arg_name=arg_name,
type_reader_name=type_reader_name)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
parametros_args += x.get_parametros_args_fmt(arg_is_array
).format(arg_name=arg_name, type_reader_name=
type_reader_name)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array
).format(arg_name=arg_name, type_writer_name=
type_writer_name)
format_args = {'base_name': base_name, 'name': x.name,
'header_fields': '\n'.join(header_fields),
'header_fields_signature': ', '.join(header_fields_signature),
'items_assign_e': '\n'.join(items_assign_e),
'items_assign_build': '\n'.join(items_assign_build),
'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':
min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,
'serialize_fields': serialize_fields, 'parametros_fields':
parametros_fields, 'parametros_args': parametros_args}
if base_name != 'ServerPacket':
f.write(x.get_header_fmt().format(**format_args))
BUILDERS.append(x.get_builder_fmt().format(**format_args))
if base_name == 'ServerPacket':
HANDLERS.append(x.get_handler_fmt().format(**format_args))
if base_name == 'ServerPacket':
dec_dispatch = x.get_parametros_fmt().format(**format_args)
pos = dec_dispatch.rfind(',')
if pos > 0:
dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]
DECODE_DISPATCH.append(dec_dispatch)
if base_name == 'ServerPacket':
args_handler = x.get_argumentosHandler_fmt().format(**format_args)
pos = args_handler.rfind(',')
if pos > 0:
args_handler = args_handler[:pos] + args_handler[pos + 1:]
pos = args_handler.rfind('\n')
args_handler = args_handler[:pos] + args_handler[pos + 1:]
ARGS_HANDLER.append(args_handler)
if base_name == 'ServerPacket':
f.write(
"""
function {base_name}DecodeAndDispatch(buffer, handler) {{
if (buffer.length() < 1) return;
var PacketID = buffer.ReadByte();
switch (PacketID) {{
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
f.write(
"""
case {i}:
{{
{decode_dispatch}
break;
}}
"""
.format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))
f.write(
"""
default:
{{
msg = "error decoding packet id: " + PacketID;
throw new Error(msg);
}}
}}
}}
"""
.format())
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
.format(base_name=base_name))
for i, x in enumerate(P):
if not x:
continue
fph.write('\n\thandle{name}: function ({arg_handler}){{ \n'.
format(base_name=base_name, name=x.name, arg_handler=
ARGS_HANDLER.pop(0)))
fph.write('\t\tlog.network("TODO: handle{name} ");\n\t}},\n'.
format(base_name=base_name, name=x.name))
for i, x in enumerate(P):
if not x:
continue
fph.write(
"""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
"""
)
def write_packets():
f = open('protocol.js', 'w')
fph = open('protocolhandlerAux.js', 'w')
f.write(
"\n/* Automatically generated file */\n\ndefine(['enums'], function (Enums) {\n"
)
write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)
write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)
write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)
f.write("""
class Protocolo{
""")
for builder in BUILDERS:
f.write(builder)
f.write(
"""
ServerPacketDecodeAndDispatch(buffer, handler){
ServerPacketDecodeAndDispatch(buffer, handler);
}
"""
)
f.write('\n }\n\n return Protocolo;\n}); ')
f.close()
fph.close()
def escribir_multimessage(f):
DECODE_DISPATCH.append(
"""
var msgIdx = buffer.ReadByte();
switch (msgIdx) {
case Enums.eMessage.NPCHitUser:
handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHitNPC:
handler.handleUserHitNPC(buffer.ReadLong());
break;
case Enums.eMessage.UserAttackedSwing:
handler.handleUserAttackedSwing(buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedByUser:
handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedUser:
handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.WorkRequestTarget:
handler.handleWorkRequestTarget(buffer.ReadByte());
break;
case Enums.eMessage.HaveKilledUser:
handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());
break;
case Enums.eMessage.UserKill:
handler.handleUserKill(buffer.ReadInteger());
break;
case Enums.eMessage.Home:
handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());
break;
case Enums.eMessage.DontSeeAnything:
handler.handleDontSeeAnything();
break;
case Enums.eMessage.NPCSwing:
handler.handleNPCSwing();
break;
case Enums.eMessage.NPCKillUser:
handler.handleNPCKillUser();
break;
case Enums.eMessage.BlockedWithShieldUser:
handler.handleBlockedWithShieldUser();
break;
case Enums.eMessage.BlockedWithShieldOther:
handler.handleBlockedWithShieldOther();
break;
case Enums.eMessage.UserSwing:
handler.handleUserSwing();
break;
case Enums.eMessage.SafeModeOn:
handler.handleSafeModeOn();
break;
case Enums.eMessage.SafeModeOff:
handler.handleSafeModeOff();
break;
case Enums.eMessage.ResuscitationSafeOff:
handler.handleResuscitationSafeOff();
break;
case Enums.eMessage.ResuscitationSafeOn:
handler.handleResuscitationSafeOn();
break;
case Enums.eMessage.NobilityLost:
handler.handleNobilityLost();
break;
case Enums.eMessage.CantUseWhileMeditating:
handler.handleCantUseWhileMeditating();
break;
case Enums.eMessage.EarnExp:
handler.handleEarnExp();
break;
case Enums.eMessage.FinishHome:
handler.handleFinishHome();
break;
case Enums.eMessage.CancelHome:
handler.handleCancelHome();
break;
default:
throw new Error("Multimessage: " + msgIdx + " no reconocido por el protocolo");
}
"""
)
ARGS_HANDLER.append('msgIdx,args')
def main():
write_packets()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/bin/env python
# coding: utf-8
"""
Dakara Online protocol generator, by Alejandro Santos
"""
from genpackets import *
from gendefs_js import *
BUILDERS = []
HANDLERS = []
DECODE_DISPATCH = []
ARGS_HANDLER = []
def write_packets_from(f, fph, base_name, namespace, P):
# Enum with IDs
if base_name != "ServerPacket" :
f.write("""var {base_name}ID = {{ \n""".format(base_name=base_name))
for i, x in enumerate(P):
if x:
f.write(" {name} : {packet_id}".format(base_name=base_name, name=x.name, packet_id=i))
f.write(",\n")
f.write(""" {base_name}ID_PACKET_COUNT : {packet_id}\n}};\n""".format(base_name=base_name, packet_id=len(P)))
# Factory
'''
f.write("""
function {base_name}Factory(buffer) {{
if (buffer.length() < 1) return 0;
var p;
PacketID = buffer.PeekByte();
switch (PacketID) {{
""".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write("""
case {i}:
p = new {name}(buffer);
break;
""".format(i=i, name=x.name))
f.write("""
}}
return p;
}}
""".format())
'''
for i, x in enumerate(P):
if not x: continue
header_fields = []
header_fields_signature = []
items_assign_e = []
items_assign_build = []
ctor_fields = ""
min_byte_count = 0
ctor_fields_bytequeue = ""
parametros_fields = ""
parametros_args = ""
serialize_fields = ""
if x.name == "MultiMessage":
escribir_multimessage(f)
continue
for y in x.args:
arg_name = y[0]
arg_type = y[1] & 0xff
arg_type_str = TYPE_TO_STR[arg_type]
arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]
arg_is_array = ((y[1] & TYPE_ARRAY) == TYPE_ARRAY)
type_reader_name = TYPE_TO_READER_NAME[arg_type]
type_writer_name = TYPE_TO_WRITER_NAME[arg_type]
ctor_fields += ", " + arg_name + "()"
items_assign_e.append(" {arg_name}: {arg_name},".format(arg_name=arg_name))
items_assign_build.append(" e.{arg_name}= {arg_name};".format(arg_name=arg_name))
if arg_is_array:
array_size=y[2]
min_byte_count += TYPE_SIZE[arg_type] * array_size
header_fields.append(" {arg_name}; ".format(arg_type_str=arg_type_str, arg_name=arg_name, array_size=array_size))
header_fields_signature.append("{arg_name} ".format(arg_type_str=arg_type_sig_str, arg_name=arg_name, array_size=array_size))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)
parametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name, array_size=array_size)
else:
min_byte_count += TYPE_SIZE[arg_type]
header_fields.append(" {arg_type_str} {arg_name}; ".format(arg_type_str=arg_type_str, arg_name=arg_name))
header_fields_signature.append("{arg_type_str} {arg_name}".format(arg_type_str=arg_type_sig_str, arg_name=arg_name))
ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)
parametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)
parametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)
serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name)
format_args = {
'base_name': base_name,
'name': x.name,
'header_fields': '\n'.join(header_fields),
'header_fields_signature': ', '.join(header_fields_signature),
'items_assign_e': '\n'.join(items_assign_e),
'items_assign_build': '\n'.join(items_assign_build),
'ctor_fields': ctor_fields,
'packet_id': i,
'min_byte_count': min_byte_count,
'ctor_fields_bytequeue': ctor_fields_bytequeue,
'serialize_fields': serialize_fields,
'parametros_fields' : parametros_fields,
'parametros_args' : parametros_args
}
# Individual packet header
if base_name != "ServerPacket" :
f.write(x.get_header_fmt().format(**format_args))
BUILDERS.append(x.get_builder_fmt().format(**format_args))
if base_name == "ServerPacket" :
HANDLERS.append(x.get_handler_fmt().format(**format_args))
#para el serverpacketdecodeanddispatch (sin tener que crear packetes)
if base_name == "ServerPacket" :
dec_dispatch = x.get_parametros_fmt().format(**format_args);
#le saco la ultima coma si es que tiene:
pos = dec_dispatch.rfind(",")
if pos > 0:
dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos+1:]
DECODE_DISPATCH.append(dec_dispatch)
if base_name == "ServerPacket" :
args_handler = x.get_argumentosHandler_fmt().format(**format_args);
#le saco la ultima coma si es que tiene:
pos = args_handler.rfind(",")
if pos > 0:
args_handler = args_handler[:pos] + args_handler[pos+1:]
#le saco fin de linea
pos = args_handler.rfind("\n")
args_handler = args_handler[:pos] + args_handler[pos+1:]
ARGS_HANDLER.append(args_handler)
# Decode and Dispatch, keeping the Packet in the stack
# Suggested by hmk
if base_name == "ServerPacket" :
f.write("""
function {base_name}DecodeAndDispatch(buffer, handler) {{
if (buffer.length() < 1) return;
var PacketID = buffer.ReadByte();
switch (PacketID) {{
""".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
f.write("""
case {i}:
{{
{decode_dispatch}
break;
}}
""".format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))
f.write("""
default:
{{
msg = "error decoding packet id: " + PacketID;
throw new Error(msg);
}}
}}
}}
""".format())
fph.write("""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
""".format(base_name=base_name))
for i, x in enumerate(P):
if not x: continue
fph.write("""\n\thandle{name}: function ({arg_handler}){{ \n""".format(base_name=base_name, name=x.name, arg_handler = ARGS_HANDLER.pop(0)))
#fph.write(HANDLERS.pop(0))
fph.write("""\t\tlog.network("TODO: handle{name} ");\n\t}},\n""".format(base_name=base_name, name=x.name))
for i, x in enumerate(P):
if not x: continue
#fph.write("""\n\thandle{name}: function (p){{ \n""".format(base_name=base_name, name=x.name))
#fph.write(HANDLERS.pop(0))
#fph.write("""\t\talert("TODO: handle{name} ");\n\t}},\n""".format(base_name=base_name, name=x.name))
fph.write("""
/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/
""")
def write_packets():
f = open("protocol.js", "w")
fph = open("protocolhandlerAux.js", "w")
f.write("""
/* Automatically generated file */
define(['enums'], function (Enums) {
""")
write_packets_from(f,fph, "ClientPacket", "client", CLIENT_PACKETS)
write_packets_from(f,fph, "ClientGMPacket", "clientgm", CLIENT_GM_PACKETS)
write_packets_from(f,fph, "ServerPacket", "server", SERVER_PACKETS)
#Multimessages hardcodeado: // TODO ; hacerlo bien
f.write("""
class Protocolo{
""")
for builder in BUILDERS:
f.write(builder)
f.write("""
ServerPacketDecodeAndDispatch(buffer, handler){
ServerPacketDecodeAndDispatch(buffer, handler);
}
""")
f.write("""
}
return Protocolo;
}); """)
f.close()
fph.close()
def escribir_multimessage(f):
DECODE_DISPATCH.append('''
var msgIdx = buffer.ReadByte();
switch (msgIdx) {
case Enums.eMessage.NPCHitUser:
handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHitNPC:
handler.handleUserHitNPC(buffer.ReadLong());
break;
case Enums.eMessage.UserAttackedSwing:
handler.handleUserAttackedSwing(buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedByUser:
handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.UserHittedUser:
handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());
break;
case Enums.eMessage.WorkRequestTarget:
handler.handleWorkRequestTarget(buffer.ReadByte());
break;
case Enums.eMessage.HaveKilledUser:
handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());
break;
case Enums.eMessage.UserKill:
handler.handleUserKill(buffer.ReadInteger());
break;
case Enums.eMessage.Home:
handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());
break;
case Enums.eMessage.DontSeeAnything:
handler.handleDontSeeAnything();
break;
case Enums.eMessage.NPCSwing:
handler.handleNPCSwing();
break;
case Enums.eMessage.NPCKillUser:
handler.handleNPCKillUser();
break;
case Enums.eMessage.BlockedWithShieldUser:
handler.handleBlockedWithShieldUser();
break;
case Enums.eMessage.BlockedWithShieldOther:
handler.handleBlockedWithShieldOther();
break;
case Enums.eMessage.UserSwing:
handler.handleUserSwing();
break;
case Enums.eMessage.SafeModeOn:
handler.handleSafeModeOn();
break;
case Enums.eMessage.SafeModeOff:
handler.handleSafeModeOff();
break;
case Enums.eMessage.ResuscitationSafeOff:
handler.handleResuscitationSafeOff();
break;
case Enums.eMessage.ResuscitationSafeOn:
handler.handleResuscitationSafeOn();
break;
case Enums.eMessage.NobilityLost:
handler.handleNobilityLost();
break;
case Enums.eMessage.CantUseWhileMeditating:
handler.handleCantUseWhileMeditating();
break;
case Enums.eMessage.EarnExp:
handler.handleEarnExp();
break;
case Enums.eMessage.FinishHome:
handler.handleFinishHome();
break;
case Enums.eMessage.CancelHome:
handler.handleCancelHome();
break;
default:
throw new Error("Multimessage: " + msgIdx + " no reconocido por el protocolo");
}
''')
ARGS_HANDLER.append("msgIdx,args")
def main():
write_packets()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "22dccf6bb76dab735f373089d0772f475b2d5a5d",
"index": 6849,
"step-1": "<mask token>\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nBUILDERS = []\nHANDLERS = []\nDECODE_DISPATCH = []\nARGS_HANDLER = []\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom genpackets import *\nfrom gendefs_js import *\nBUILDERS = []\nHANDLERS = []\nDECODE_DISPATCH = []\nARGS_HANDLER = []\n\n\ndef write_packets_from(f, fph, base_name, namespace, P):\n if base_name != 'ServerPacket':\n f.write('var {base_name}ID = {{ \\n'.format(base_name=base_name))\n for i, x in enumerate(P):\n if x:\n f.write(' {name} : {packet_id}'.format(base_name=\n base_name, name=x.name, packet_id=i))\n f.write(',\\n')\n f.write(' {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n'.\n format(base_name=base_name, packet_id=len(P)))\n \"\"\"\n f.write(\"\"\\\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\\\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\\\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\\\".format(i=i, name=x.name))\n\n f.write(\"\"\\\"\n }}\n return p;\n}}\n\"\"\\\".format())\n \"\"\"\n for i, x in enumerate(P):\n if not x:\n continue\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = ''\n min_byte_count = 0\n ctor_fields_bytequeue = ''\n parametros_fields = ''\n parametros_args = ''\n serialize_fields = ''\n if x.name == 'MultiMessage':\n escribir_multimessage(f)\n continue\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 255\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = y[1] & TYPE_ARRAY == TYPE_ARRAY\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n ctor_fields += ', ' + arg_name + '()'\n items_assign_e.append(' {arg_name}: {arg_name},'.\n format(arg_name=arg_name))\n items_assign_build.append(' e.{arg_name}= {arg_name};'.\n format(arg_name=arg_name))\n if arg_is_array:\n array_size = y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(' {arg_name}; '.format(arg_type_str\n =arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append('{arg_name} '.format(\n arg_type_str=arg_type_sig_str, arg_name=arg_name,\n array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name, array_size=array_size)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(' {arg_type_str} {arg_name}; '.\n format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append('{arg_type_str} {arg_name}'.\n format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(\n arg_is_array).format(arg_name=arg_name,\n type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array\n ).format(arg_name=arg_name, type_reader_name=\n type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array\n ).format(arg_name=arg_name, type_writer_name=\n type_writer_name)\n format_args = {'base_name': base_name, 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields, 'packet_id': i, 'min_byte_count':\n min_byte_count, 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields, 'parametros_fields':\n parametros_fields, 'parametros_args': parametros_args}\n if base_name != 'ServerPacket':\n f.write(x.get_header_fmt().format(**format_args))\n BUILDERS.append(x.get_builder_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n if base_name == 'ServerPacket':\n dec_dispatch = x.get_parametros_fmt().format(**format_args)\n pos = dec_dispatch.rfind(',')\n if pos > 0:\n dec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos + 1:]\n DECODE_DISPATCH.append(dec_dispatch)\n if base_name == 'ServerPacket':\n args_handler = x.get_argumentosHandler_fmt().format(**format_args)\n pos = args_handler.rfind(',')\n if pos > 0:\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n pos = args_handler.rfind('\\n')\n args_handler = args_handler[:pos] + args_handler[pos + 1:]\n ARGS_HANDLER.append(args_handler)\n if base_name == 'ServerPacket':\n f.write(\n \"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n f.write(\n \"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\"\n .format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n f.write(\n \"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\"\n .format())\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n .format(base_name=base_name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write('\\n\\thandle{name}: function ({arg_handler}){{ \\n'.\n format(base_name=base_name, name=x.name, arg_handler=\n ARGS_HANDLER.pop(0)))\n fph.write('\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n'.\n format(base_name=base_name, name=x.name))\n for i, x in enumerate(P):\n if not x:\n continue\n fph.write(\n \"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\"\n )\n\n\ndef write_packets():\n f = open('protocol.js', 'w')\n fph = open('protocolhandlerAux.js', 'w')\n f.write(\n \"\\n/* Automatically generated file */\\n\\ndefine(['enums'], function (Enums) {\\n\"\n )\n write_packets_from(f, fph, 'ClientPacket', 'client', CLIENT_PACKETS)\n write_packets_from(f, fph, 'ClientGMPacket', 'clientgm', CLIENT_GM_PACKETS)\n write_packets_from(f, fph, 'ServerPacket', 'server', SERVER_PACKETS)\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n f.write(\n \"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\"\n )\n f.write('\\n }\\n\\n return Protocolo;\\n}); ')\n f.close()\n fph.close()\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append(\n \"\"\"\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n\"\"\"\n )\n ARGS_HANDLER.append('msgIdx,args')\n\n\ndef main():\n write_packets()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/bin/env python\n# coding: utf-8\n\n\"\"\"\nDakara Online protocol generator, by Alejandro Santos\n\"\"\"\n\nfrom genpackets import *\nfrom gendefs_js import *\n\nBUILDERS = []\nHANDLERS = []\nDECODE_DISPATCH = []\nARGS_HANDLER = []\ndef write_packets_from(f, fph, base_name, namespace, P):\n\n\n # Enum with IDs\n if base_name != \"ServerPacket\" :\n \tf.write(\"\"\"var {base_name}ID = {{ \\n\"\"\".format(base_name=base_name))\n \tfor i, x in enumerate(P):\n \t\tif x:\n \t\t\tf.write(\" {name} : {packet_id}\".format(base_name=base_name, name=x.name, packet_id=i))\n \t\t\tf.write(\",\\n\")\n \tf.write(\"\"\" {base_name}ID_PACKET_COUNT : {packet_id}\\n}};\\n\"\"\".format(base_name=base_name, packet_id=len(P)))\n\n# Factory\n '''\n f.write(\"\"\"\nfunction {base_name}Factory(buffer) {{\n if (buffer.length() < 1) return 0;\n var p;\n PacketID = buffer.PeekByte();\n\n switch (PacketID) {{\n\"\"\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\"\n case {i}:\n p = new {name}(buffer);\n break;\n\"\"\".format(i=i, name=x.name))\n\n f.write(\"\"\"\n }}\n return p;\n}}\n\"\"\".format())\n '''\n \n for i, x in enumerate(P):\n if not x: continue\n\n header_fields = []\n header_fields_signature = []\n items_assign_e = []\n items_assign_build = []\n ctor_fields = \"\"\n min_byte_count = 0\n ctor_fields_bytequeue = \"\"\n parametros_fields = \"\"\n parametros_args = \"\"\n serialize_fields = \"\"\n\n if x.name == \"MultiMessage\":\n escribir_multimessage(f)\n continue\n\n for y in x.args:\n arg_name = y[0]\n arg_type = y[1] & 0xff\n arg_type_str = TYPE_TO_STR[arg_type]\n arg_type_sig_str = TYPE_TO_SIGNATURE_STR[arg_type]\n arg_is_array = ((y[1] & TYPE_ARRAY) == TYPE_ARRAY)\n type_reader_name = TYPE_TO_READER_NAME[arg_type]\n type_writer_name = TYPE_TO_WRITER_NAME[arg_type]\n\n ctor_fields += \", \" + arg_name + \"()\"\n\n items_assign_e.append(\" {arg_name}: {arg_name},\".format(arg_name=arg_name))\n items_assign_build.append(\" e.{arg_name}= {arg_name};\".format(arg_name=arg_name))\n\n if arg_is_array:\n array_size=y[2]\n min_byte_count += TYPE_SIZE[arg_type] * array_size\n header_fields.append(\" {arg_name}; \".format(arg_type_str=arg_type_str, arg_name=arg_name, array_size=array_size))\n header_fields_signature.append(\"{arg_name} \".format(arg_type_str=arg_type_sig_str, arg_name=arg_name, array_size=array_size))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)\n \tparametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)\n \tparametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name, array_size=array_size)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name, array_size=array_size)\n else:\n min_byte_count += TYPE_SIZE[arg_type]\n header_fields.append(\" {arg_type_str} {arg_name}; \".format(arg_type_str=arg_type_str, arg_name=arg_name))\n header_fields_signature.append(\"{arg_type_str} {arg_name}\".format(arg_type_str=arg_type_sig_str, arg_name=arg_name))\n ctor_fields_bytequeue += x.get_ctor_fields_bytequeue_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)\n parametros_fields += x.get_parametros_fields_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)\n parametros_args += x.get_parametros_args_fmt(arg_is_array).format(arg_name=arg_name, type_reader_name=type_reader_name)\n serialize_fields += x.get_serialize_fields_fmt(arg_is_array).format(arg_name=arg_name, type_writer_name=type_writer_name)\n\n format_args = {\n 'base_name': base_name,\n 'name': x.name,\n 'header_fields': '\\n'.join(header_fields),\n 'header_fields_signature': ', '.join(header_fields_signature),\n 'items_assign_e': '\\n'.join(items_assign_e),\n 'items_assign_build': '\\n'.join(items_assign_build),\n 'ctor_fields': ctor_fields,\n 'packet_id': i,\n 'min_byte_count': min_byte_count,\n 'ctor_fields_bytequeue': ctor_fields_bytequeue,\n 'serialize_fields': serialize_fields,\n 'parametros_fields' : parametros_fields,\n 'parametros_args' : parametros_args\n }\n\n # Individual packet header\n if base_name != \"ServerPacket\" :\n \tf.write(x.get_header_fmt().format(**format_args))\n \tBUILDERS.append(x.get_builder_fmt().format(**format_args))\n\n if base_name == \"ServerPacket\" :\n HANDLERS.append(x.get_handler_fmt().format(**format_args))\n\n #para el serverpacketdecodeanddispatch (sin tener que crear packetes)\n if base_name == \"ServerPacket\" :\n \tdec_dispatch = x.get_parametros_fmt().format(**format_args);\n \t#le saco la ultima coma si es que tiene:\n \tpos = dec_dispatch.rfind(\",\")\n \tif pos > 0:\n \t\tdec_dispatch = dec_dispatch[:pos] + dec_dispatch[pos+1:]\n \tDECODE_DISPATCH.append(dec_dispatch)\n\n if base_name == \"ServerPacket\" :\n args_handler = x.get_argumentosHandler_fmt().format(**format_args);\n #le saco la ultima coma si es que tiene:\n pos = args_handler.rfind(\",\")\n if pos > 0:\n \targs_handler = args_handler[:pos] + args_handler[pos+1:]\n #le saco fin de linea\n pos = args_handler.rfind(\"\\n\")\n args_handler = args_handler[:pos] + args_handler[pos+1:]\n ARGS_HANDLER.append(args_handler)\n\n\n\n\n\n \n # Decode and Dispatch, keeping the Packet in the stack\n # Suggested by hmk\n if base_name == \"ServerPacket\" :\n f.write(\"\"\"\nfunction {base_name}DecodeAndDispatch(buffer, handler) {{\n if (buffer.length() < 1) return;\n var PacketID = buffer.ReadByte();\n\n switch (PacketID) {{\n\"\"\".format(base_name=base_name))\n\n for i, x in enumerate(P):\n if not x: continue\n f.write(\"\"\"\n case {i}:\n {{\n {decode_dispatch}\n break;\n }}\n\"\"\".format(i=i, decode_dispatch=DECODE_DISPATCH.pop(0)))\n\n f.write(\"\"\"\n default:\n {{\n msg = \"error decoding packet id: \" + PacketID;\n throw new Error(msg);\n }}\n }}\n}}\n\"\"\".format())\n\n fph.write(\"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\".format(base_name=base_name))\n for i, x in enumerate(P):\n if not x: continue\n fph.write(\"\"\"\\n\\thandle{name}: function ({arg_handler}){{ \\n\"\"\".format(base_name=base_name, name=x.name, arg_handler = ARGS_HANDLER.pop(0)))\n #fph.write(HANDLERS.pop(0))\n fph.write(\"\"\"\\t\\tlog.network(\"TODO: handle{name} \");\\n\\t}},\\n\"\"\".format(base_name=base_name, name=x.name))\n\n for i, x in enumerate(P):\n if not x: continue\n #fph.write(\"\"\"\\n\\thandle{name}: function (p){{ \\n\"\"\".format(base_name=base_name, name=x.name))\n #fph.write(HANDLERS.pop(0))\n #fph.write(\"\"\"\\t\\talert(\"TODO: handle{name} \");\\n\\t}},\\n\"\"\".format(base_name=base_name, name=x.name))\n\n fph.write(\"\"\"\n/** ESTE ARCHIVO SOLO ESTA PARA FACILITAR ESCRIBIR LOS HANLDLES POR PRIMERA VEZ, NO TINENE NINGUN USO ***************************************************************************************************************************************************/\n\"\"\")\n\n\ndef write_packets():\n f = open(\"protocol.js\", \"w\")\n fph = open(\"protocolhandlerAux.js\", \"w\")\n\n f.write(\"\"\"\n/* Automatically generated file */\n\ndefine(['enums'], function (Enums) {\n\"\"\")\n\n write_packets_from(f,fph, \"ClientPacket\", \"client\", CLIENT_PACKETS)\n write_packets_from(f,fph, \"ClientGMPacket\", \"clientgm\", CLIENT_GM_PACKETS)\n write_packets_from(f,fph, \"ServerPacket\", \"server\", SERVER_PACKETS)\n\n #Multimessages hardcodeado: // TODO ; hacerlo bien\n f.write(\"\"\"\n class Protocolo{\n\"\"\")\n for builder in BUILDERS:\n f.write(builder)\n\n f.write(\"\"\"\n ServerPacketDecodeAndDispatch(buffer, handler){\n ServerPacketDecodeAndDispatch(buffer, handler);\n }\n \"\"\")\n f.write(\"\"\"\n }\n\n return Protocolo;\n}); \"\"\")\n\n\n\n\n\n f.close()\n fph.close()\n\n\n\ndef escribir_multimessage(f):\n DECODE_DISPATCH.append('''\n\n var msgIdx = buffer.ReadByte();\n switch (msgIdx) {\n\n case Enums.eMessage.NPCHitUser:\n handler.handleNPCHitUser(buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHitNPC:\n handler.handleUserHitNPC(buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserAttackedSwing:\n handler.handleUserAttackedSwing(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedByUser:\n handler.handleUserHittedByUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.UserHittedUser:\n handler.handleUserHittedUser(buffer.ReadInteger(), buffer.ReadByte(), buffer.ReadInteger());\n break;\n\n case Enums.eMessage.WorkRequestTarget:\n handler.handleWorkRequestTarget(buffer.ReadByte());\n break;\n\n case Enums.eMessage.HaveKilledUser:\n handler.handleHaveKilledUser(buffer.ReadInteger(),buffer.ReadLong());\n break;\n\n case Enums.eMessage.UserKill:\n handler.handleUserKill(buffer.ReadInteger());\n break;\n\n case Enums.eMessage.Home:\n handler.handleHome(buffer.ReadByte(),buffer.ReadInteger(),buffer.ReadUnicodeString());\n break;\n\n case Enums.eMessage.DontSeeAnything:\n handler.handleDontSeeAnything();\n break;\n\n case Enums.eMessage.NPCSwing:\n\n handler.handleNPCSwing();\n break;\n\n case Enums.eMessage.NPCKillUser:\n\n handler.handleNPCKillUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldUser:\n\n handler.handleBlockedWithShieldUser();\n break;\n\n case Enums.eMessage.BlockedWithShieldOther:\n\n handler.handleBlockedWithShieldOther();\n break;\n\n case Enums.eMessage.UserSwing:\n\n handler.handleUserSwing();\n break;\n\n case Enums.eMessage.SafeModeOn:\n\n handler.handleSafeModeOn();\n break;\n\n case Enums.eMessage.SafeModeOff:\n\n handler.handleSafeModeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOff:\n\n handler.handleResuscitationSafeOff();\n break;\n\n case Enums.eMessage.ResuscitationSafeOn:\n\n handler.handleResuscitationSafeOn();\n break;\n\n case Enums.eMessage.NobilityLost:\n\n handler.handleNobilityLost();\n break;\n\n case Enums.eMessage.CantUseWhileMeditating:\n\n handler.handleCantUseWhileMeditating();\n break;\n\n case Enums.eMessage.EarnExp:\n\n handler.handleEarnExp();\n break;\n\n case Enums.eMessage.FinishHome:\n\n handler.handleFinishHome();\n break;\n\n case Enums.eMessage.CancelHome:\n\n handler.handleCancelHome();\n break;\n\n default:\n throw new Error(\"Multimessage: \" + msgIdx + \" no reconocido por el protocolo\");\n }\n''')\n ARGS_HANDLER.append(\"msgIdx,args\")\n\n\ndef main():\n write_packets()\n\nif __name__ == '__main__':\n main()",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def decorated_example():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def decorated_example():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
def test_example(decorated_example):
"""Sample pytest test function with the pytest fixture as an argument.
"""
import visual_coding_2p_analysis
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
@pytest.fixture
def decorated_example():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
def test_example(decorated_example):
"""Sample pytest test function with the pytest fixture as an argument.
"""
import visual_coding_2p_analysis
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_visual_coding_2p_analysis
----------------------------------
Tests for `visual_coding_2p_analysis` module.
"""
import pytest
@pytest.fixture
def decorated_example():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
def test_example(decorated_example):
"""Sample pytest test function with the pytest fixture as an argument.
"""
import visual_coding_2p_analysis
|
flexible
|
{
"blob_id": "ae3198e68d9479605327b729c01fb15eae87ab98",
"index": 3282,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n\ndef test_example(decorated_example):\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\n \"\"\"\n import visual_coding_2p_analysis\n",
"step-4": "<mask token>\nimport pytest\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\n\ndef test_example(decorated_example):\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\n \"\"\"\n import visual_coding_2p_analysis\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_visual_coding_2p_analysis\n----------------------------------\n\nTests for `visual_coding_2p_analysis` module.\n\"\"\"\nimport pytest\n\n\n@pytest.fixture\ndef decorated_example():\n \"\"\"Sample pytest fixture.\n See more at: http://doc.pytest.org/en/latest/fixture.html\n \"\"\"\n\ndef test_example(decorated_example):\n \"\"\"Sample pytest test function with the pytest fixture as an argument.\n \"\"\"\n import visual_coding_2p_analysis\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def riddle(lst):
"""
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
"""
max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def riddle(lst):
"""
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
"""
max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
def riddle_dp(arr):
"""
Too slow to pass large test cases. See `riddle`.
"""
N = len(arr)
min_w = {}
for i, el in enumerate(arr):
min_w[1, i] = el
for w in range(2, len(arr) + 1):
for i in range(N - w + 1):
min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])
return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1,
len(arr) + 1)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def riddle(lst):
"""
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
"""
max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
def riddle_dp(arr):
"""
Too slow to pass large test cases. See `riddle`.
"""
N = len(arr)
min_w = {}
for i, el in enumerate(arr):
min_w[1, i] = el
for w in range(2, len(arr) + 1):
for i in range(N - w + 1):
min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])
return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1,
len(arr) + 1)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import math
import os
import random
import re
import sys
from collections import defaultdict
from heapq import heappush, heappop
from bisect import insort_left
def riddle(lst):
"""
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
"""
max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
def riddle_dp(arr):
"""
Too slow to pass large test cases. See `riddle`.
"""
N = len(arr)
min_w = {}
for i, el in enumerate(arr):
min_w[1, i] = el
for w in range(2, len(arr) + 1):
for i in range(N - w + 1):
min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])
return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1,
len(arr) + 1)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
<|reserved_special_token_1|>
#!/bin/python3
# TODO: implement the stack O(N) version
'''
Naive: O(N^3) or sum_{k=1...N}( O(N^2 (N-K)) )
for each size N
for each window of size N in the array
traverse the window to find the max
Naive with heap: O(N^2 log N)
for each size N O(N)
traverse array and accumulate window of size N O(N log N)
find max O(1)
DP:
Notice that min(W, p), the min size for window of size W and at position p, is
equal to min(min(W - 1, p), min(W - 1, p + 1)). Therefore, DP with these
tables can reduce the size of the problem to O(W^2) ~= O(N^2). Is this good
enough? No.
Domination windows:
Let us say that i dominates a contiguous range of n values if it's lower than
all n of its neighboring values. This means that i will show up as a min window
when considering window sizes of up to size n. We want to find the largest i
such that it domaintes other numbers in a window of size n. Now how to find this
efficiently? If we iterate through each i and compare it to its n neighbors,
that will also be O(N^2) time.
Start with lowest number and 1-dimensional flood fill. This will take O(N^2)
time in the worst case though.
However, you don't actually have to perform the flood fill. Instead, we can just
use the coordinates of lower numbers and perform something like binary search
to find the closest coordinates to a given coordinate in O(log N) time.
Overall this means that we iterate through each number, starting from the
lowest, and perform O(log N) time binary searches to find the boundaries over
which this element i dominates. Total time is O(N log N).
'''
import math
import os
import random
import re
import sys
from collections import defaultdict
from heapq import heappush, heappop
from bisect import insort_left
# Complete the riddle function below.
def riddle(lst):
'''
Holy fuck.
Better summary than above of what's happening:
Define an value `v` in the list to dominate a range of size `n`, including `v`
itself, if `v` is smaller than all other numbers in this contiguous range.
Define `v`'s "dominating window" to be the largest such range. If `v` has a
dominating window of size `n`, then it must show up as a value when we take
minimums of size `w`. Therefore, to find the maximum of all such minimum
windows, we only need to find the maximum `v` which dominates a range of size
`n` or greater, for each `n` between 1 and `N`.
To do this, the naive algorithm is to, for each number, flood fill in each
direction until you hit a number smaller than itself. However, we can instead
start with the smallest number, and keep a list of indices which we have
already processed, that we know is smaller than the number we're processing.
Using binary search, we can find the interval indices in which the current
index lies, and find the bounding interval in O(log N) time. Repeat for each
of `n` numbers for a total time complexity of O(N log N).
Finally, for each window size `w`, find the maximum `v` that dominates a range
of size `n` or larger.
It seems like this is not the best solution though. There is a O(N) solution
using stacks.
'''
max_by_w_size = { w: -float('inf') for w in range(1, len(lst) + 1) }
# note that bounding_indices are indexes into len(lst), not values themselves
bounding_indices = [-1, len(lst)]
sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])
for i, value in sorted_lst:
# note that l_index and r_index are indices to the bounding indices
r_index = bsearch(bounding_indices, i)
l_index = r_index - 1
l_point = bounding_indices[l_index]
r_point = bounding_indices[r_index]
# (l_point + 1, r_point) defines a "dominating window" for `value`
w = r_point - (l_point + 1)
assert w > 0
max_by_w_size[w] = max(max_by_w_size[w], value)
insort_left(bounding_indices, i)
m = -float('inf')
maxes = []
for w in reversed(range(1, len(lst) + 1)):
m = max(m, max_by_w_size[w])
maxes.append(m)
return reversed(maxes)
def bsearch(lst, target):
i, j = 0, len(lst)
while i < j:
mid = (i + j) // 2
if lst[mid] == target:
return mid + 1 # insert on the right side of the same number, not that it should matter?
elif lst[mid] < target:
i = mid + 1
else:
j = mid
return i
def riddle_dp(arr):
'''
Too slow to pass large test cases. See `riddle`.
'''
N = len(arr)
min_w = {} # dict of (win_size, win_position) to minimum
for i, el in enumerate(arr):
min_w[(1, i)] = el
for w in range(2, len(arr) + 1):
for i in range(N - w + 1):
# print('w, i', w, i)
min_w[(w, i)] = min(min_w[(w - 1, i)], min_w[(w - 1, i + 1)])
# print('min_w', min_w)
return [max(min_w[(w, i)] for i in range(N - w + 1)) for w in range(1, len(arr) + 1)]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
|
flexible
|
{
"blob_id": "dce7fd0c9ed8e1d433f9131a8d137c8dcca4ac56",
"index": 8307,
"step-1": "<mask token>\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\ndef riddle_dp(arr):\n \"\"\"\n Too slow to pass large test cases. See `riddle`.\n \"\"\"\n N = len(arr)\n min_w = {}\n for i, el in enumerate(arr):\n min_w[1, i] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])\n return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1, \n len(arr) + 1)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\ndef riddle_dp(arr):\n \"\"\"\n Too slow to pass large test cases. See `riddle`.\n \"\"\"\n N = len(arr)\n min_w = {}\n for i, el in enumerate(arr):\n min_w[1, i] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])\n return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1, \n len(arr) + 1)]\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input())\n arr = list(map(int, input().rstrip().split()))\n res = riddle(arr)\n fptr.write(' '.join(map(str, res)))\n fptr.write('\\n')\n fptr.close()\n",
"step-4": "<mask token>\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\nfrom bisect import insort_left\n\n\ndef riddle(lst):\n \"\"\"\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n \"\"\"\n max_by_w_size = {w: (-float('inf')) for w in range(1, len(lst) + 1)}\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\n\ndef riddle_dp(arr):\n \"\"\"\n Too slow to pass large test cases. See `riddle`.\n \"\"\"\n N = len(arr)\n min_w = {}\n for i, el in enumerate(arr):\n min_w[1, i] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n min_w[w, i] = min(min_w[w - 1, i], min_w[w - 1, i + 1])\n return [max(min_w[w, i] for i in range(N - w + 1)) for w in range(1, \n len(arr) + 1)]\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n n = int(input())\n arr = list(map(int, input().rstrip().split()))\n res = riddle(arr)\n fptr.write(' '.join(map(str, res)))\n fptr.write('\\n')\n fptr.close()\n",
"step-5": "#!/bin/python3\n\n# TODO: implement the stack O(N) version\n\n'''\nNaive: O(N^3) or sum_{k=1...N}( O(N^2 (N-K)) )\n for each size N\n for each window of size N in the array\n traverse the window to find the max\n\nNaive with heap: O(N^2 log N)\n for each size N O(N)\n traverse array and accumulate window of size N O(N log N)\n find max O(1)\n\nDP:\nNotice that min(W, p), the min size for window of size W and at position p, is\nequal to min(min(W - 1, p), min(W - 1, p + 1)). Therefore, DP with these\ntables can reduce the size of the problem to O(W^2) ~= O(N^2). Is this good\nenough? No.\n\nDomination windows:\nLet us say that i dominates a contiguous range of n values if it's lower than\nall n of its neighboring values. This means that i will show up as a min window\nwhen considering window sizes of up to size n. We want to find the largest i\nsuch that it domaintes other numbers in a window of size n. Now how to find this\nefficiently? If we iterate through each i and compare it to its n neighbors,\nthat will also be O(N^2) time.\n\nStart with lowest number and 1-dimensional flood fill. This will take O(N^2)\ntime in the worst case though.\n\nHowever, you don't actually have to perform the flood fill. Instead, we can just\nuse the coordinates of lower numbers and perform something like binary search\nto find the closest coordinates to a given coordinate in O(log N) time.\n\nOverall this means that we iterate through each number, starting from the\nlowest, and perform O(log N) time binary searches to find the boundaries over\nwhich this element i dominates. Total time is O(N log N).\n'''\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\nfrom collections import defaultdict\nfrom heapq import heappush, heappop\nfrom bisect import insort_left\n\n# Complete the riddle function below.\ndef riddle(lst):\n '''\n Holy fuck.\n\n Better summary than above of what's happening:\n\n Define an value `v` in the list to dominate a range of size `n`, including `v`\n itself, if `v` is smaller than all other numbers in this contiguous range.\n Define `v`'s \"dominating window\" to be the largest such range. If `v` has a\n dominating window of size `n`, then it must show up as a value when we take\n minimums of size `w`. Therefore, to find the maximum of all such minimum\n windows, we only need to find the maximum `v` which dominates a range of size\n `n` or greater, for each `n` between 1 and `N`.\n\n To do this, the naive algorithm is to, for each number, flood fill in each\n direction until you hit a number smaller than itself. However, we can instead\n start with the smallest number, and keep a list of indices which we have\n already processed, that we know is smaller than the number we're processing.\n Using binary search, we can find the interval indices in which the current\n index lies, and find the bounding interval in O(log N) time. Repeat for each\n of `n` numbers for a total time complexity of O(N log N).\n\n Finally, for each window size `w`, find the maximum `v` that dominates a range\n of size `n` or larger.\n\n It seems like this is not the best solution though. There is a O(N) solution\n using stacks.\n '''\n max_by_w_size = { w: -float('inf') for w in range(1, len(lst) + 1) }\n # note that bounding_indices are indexes into len(lst), not values themselves\n bounding_indices = [-1, len(lst)]\n sorted_lst = sorted(enumerate(lst), key=lambda x: x[1])\n for i, value in sorted_lst:\n # note that l_index and r_index are indices to the bounding indices\n r_index = bsearch(bounding_indices, i)\n l_index = r_index - 1\n l_point = bounding_indices[l_index]\n r_point = bounding_indices[r_index]\n # (l_point + 1, r_point) defines a \"dominating window\" for `value`\n w = r_point - (l_point + 1)\n assert w > 0\n max_by_w_size[w] = max(max_by_w_size[w], value)\n insort_left(bounding_indices, i)\n\n m = -float('inf')\n maxes = []\n for w in reversed(range(1, len(lst) + 1)):\n m = max(m, max_by_w_size[w])\n maxes.append(m)\n return reversed(maxes)\n\ndef bsearch(lst, target):\n i, j = 0, len(lst)\n while i < j:\n mid = (i + j) // 2\n if lst[mid] == target:\n return mid + 1 # insert on the right side of the same number, not that it should matter?\n elif lst[mid] < target:\n i = mid + 1\n else:\n j = mid\n return i\n\ndef riddle_dp(arr):\n '''\n Too slow to pass large test cases. See `riddle`.\n '''\n N = len(arr)\n min_w = {} # dict of (win_size, win_position) to minimum\n for i, el in enumerate(arr):\n min_w[(1, i)] = el\n for w in range(2, len(arr) + 1):\n for i in range(N - w + 1):\n # print('w, i', w, i)\n min_w[(w, i)] = min(min_w[(w - 1, i)], min_w[(w - 1, i + 1)])\n # print('min_w', min_w)\n return [max(min_w[(w, i)] for i in range(N - w + 1)) for w in range(1, len(arr) + 1)]\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n res = riddle(arr)\n\n fptr.write(' '.join(map(str, res)))\n fptr.write('\\n')\n\n fptr.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def get_level_diff(word, only_common=False):
if only_common:
word_df = df[(df['word'] == word) & (df['common'] == 1)]
else:
word_df = df[df['word'] == word]
return (word_df.values[0][3], word_df.values[0][8]) if len(word_df
) > 0 else (None, None)
<|reserved_special_token_0|>
def translate_words(words, target):
key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'
words_string = ''
for word in words:
words_string += '&q='
words_string += word
url = (
f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'
)
res = json.loads(requests.get(url).content)['data']['translations']
return [s['translatedText'] for s in res]
def hello_http(request):
request_args = request.args
if request_args and 'words' in request_args:
words = json.loads(request_args['words'])
if isinstance(words, list) and len(words) > 0:
target = request_args.get('target', 'es')
by_str = request_args.get('by', 'level')
by = 1 if by_str == 'freq' else 0
reverse = request_args.get('reverse', 'false') == 'true'
only_common = request_args.get('only-common', 'false') == 'true'
results = order_words(words, by=by, reverse=reverse,
only_common=only_common)
translated = translate_words([result[0] for result in results],
target)
return json.dumps([[results[i][0], results[i][1], translated[i]
] for i in range(len(results))])
else:
return 'not list'
else:
return 'error'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_level_diff(word, only_common=False):
if only_common:
word_df = df[(df['word'] == word) & (df['common'] == 1)]
else:
word_df = df[df['word'] == word]
return (word_df.values[0][3], word_df.values[0][8]) if len(word_df
) > 0 else (None, None)
def order_words(words, by=0, reverse=False, only_common=False):
if by not in {0, 1}:
raise Exception('by is either 0 (by level), 1 (by frequency)')
if by == 1:
reverse = not reverse
word_results = []
for word in words:
level, freq = get_level_diff(word, only_common=only_common)
if level != None:
if by == 0:
word_results.append((word, level))
else:
word_results.append((word, freq))
word_results.sort(key=lambda x: x[1], reverse=reverse)
return word_results
def translate_words(words, target):
key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'
words_string = ''
for word in words:
words_string += '&q='
words_string += word
url = (
f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'
)
res = json.loads(requests.get(url).content)['data']['translations']
return [s['translatedText'] for s in res]
def hello_http(request):
request_args = request.args
if request_args and 'words' in request_args:
words = json.loads(request_args['words'])
if isinstance(words, list) and len(words) > 0:
target = request_args.get('target', 'es')
by_str = request_args.get('by', 'level')
by = 1 if by_str == 'freq' else 0
reverse = request_args.get('reverse', 'false') == 'true'
only_common = request_args.get('only-common', 'false') == 'true'
results = order_words(words, by=by, reverse=reverse,
only_common=only_common)
translated = translate_words([result[0] for result in results],
target)
return json.dumps([[results[i][0], results[i][1], translated[i]
] for i in range(len(results))])
else:
return 'not list'
else:
return 'error'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('result.csv', newline='') as f:
df = pd.read_csv(f)
def get_level_diff(word, only_common=False):
if only_common:
word_df = df[(df['word'] == word) & (df['common'] == 1)]
else:
word_df = df[df['word'] == word]
return (word_df.values[0][3], word_df.values[0][8]) if len(word_df
) > 0 else (None, None)
def order_words(words, by=0, reverse=False, only_common=False):
if by not in {0, 1}:
raise Exception('by is either 0 (by level), 1 (by frequency)')
if by == 1:
reverse = not reverse
word_results = []
for word in words:
level, freq = get_level_diff(word, only_common=only_common)
if level != None:
if by == 0:
word_results.append((word, level))
else:
word_results.append((word, freq))
word_results.sort(key=lambda x: x[1], reverse=reverse)
return word_results
def translate_words(words, target):
key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'
words_string = ''
for word in words:
words_string += '&q='
words_string += word
url = (
f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'
)
res = json.loads(requests.get(url).content)['data']['translations']
return [s['translatedText'] for s in res]
def hello_http(request):
request_args = request.args
if request_args and 'words' in request_args:
words = json.loads(request_args['words'])
if isinstance(words, list) and len(words) > 0:
target = request_args.get('target', 'es')
by_str = request_args.get('by', 'level')
by = 1 if by_str == 'freq' else 0
reverse = request_args.get('reverse', 'false') == 'true'
only_common = request_args.get('only-common', 'false') == 'true'
results = order_words(words, by=by, reverse=reverse,
only_common=only_common)
translated = translate_words([result[0] for result in results],
target)
return json.dumps([[results[i][0], results[i][1], translated[i]
] for i in range(len(results))])
else:
return 'not list'
else:
return 'error'
<|reserved_special_token_1|>
from flask import escape
import pandas as pd
import json
import requests
with open('result.csv', newline='') as f:
df = pd.read_csv(f)
def get_level_diff(word, only_common=False):
if only_common:
word_df = df[(df['word'] == word) & (df['common'] == 1)]
else:
word_df = df[df['word'] == word]
return (word_df.values[0][3], word_df.values[0][8]) if len(word_df
) > 0 else (None, None)
def order_words(words, by=0, reverse=False, only_common=False):
if by not in {0, 1}:
raise Exception('by is either 0 (by level), 1 (by frequency)')
if by == 1:
reverse = not reverse
word_results = []
for word in words:
level, freq = get_level_diff(word, only_common=only_common)
if level != None:
if by == 0:
word_results.append((word, level))
else:
word_results.append((word, freq))
word_results.sort(key=lambda x: x[1], reverse=reverse)
return word_results
def translate_words(words, target):
key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'
words_string = ''
for word in words:
words_string += '&q='
words_string += word
url = (
f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'
)
res = json.loads(requests.get(url).content)['data']['translations']
return [s['translatedText'] for s in res]
def hello_http(request):
request_args = request.args
if request_args and 'words' in request_args:
words = json.loads(request_args['words'])
if isinstance(words, list) and len(words) > 0:
target = request_args.get('target', 'es')
by_str = request_args.get('by', 'level')
by = 1 if by_str == 'freq' else 0
reverse = request_args.get('reverse', 'false') == 'true'
only_common = request_args.get('only-common', 'false') == 'true'
results = order_words(words, by=by, reverse=reverse,
only_common=only_common)
translated = translate_words([result[0] for result in results],
target)
return json.dumps([[results[i][0], results[i][1], translated[i]
] for i in range(len(results))])
else:
return 'not list'
else:
return 'error'
<|reserved_special_token_1|>
from flask import escape
import pandas as pd
import json
import requests
with open('result.csv', newline='') as f:
df = pd.read_csv(f)
def get_level_diff(word, only_common=False):
if only_common:
word_df = df[(df['word']==word) & (df['common']==1)]
else:
word_df = df[df['word']==word]
return (word_df.values[0][3], word_df.values[0][8]) if len(word_df) > 0 else (None, None)
# order words based on either level or frequency.
def order_words(words, by=0, reverse=False, only_common=False):
if (by not in {0, 1}): raise Exception("by is either 0 (by level), 1 (by frequency)")
if (by == 1): reverse = not reverse
word_results = []
for word in words:
level, freq = get_level_diff(word, only_common=only_common)
if level != None:
if by == 0:
word_results.append((word, level))
else:
word_results.append((word, freq))
word_results.sort(key=lambda x : x[1], reverse=reverse)
return word_results
def translate_words(words, target):
key = "AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w"
words_string = ""
for word in words:
words_string += "&q="
words_string += word
url = f"https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}"
res = json.loads(requests.get(url).content)['data']['translations']
return [s['translatedText'] for s in res]
def hello_http(request):
request_args = request.args
#'words', 'lang-from', 'lang-to', 'by', 'reverse'
if request_args and 'words' in request_args:
words = json.loads(request_args['words'])
if isinstance(words, list) and len(words) > 0:
target = request_args.get('target', 'es')
by_str = request_args.get('by', 'level')
by = 1 if by_str == 'freq' else 0
reverse = request_args.get('reverse', 'false') == 'true'
only_common = request_args.get('only-common', 'false') == 'true'
results = order_words(words, by=by, reverse=reverse, only_common=only_common)
translated = translate_words([result[0] for result in results], target)
return json.dumps([[results[i][0], results[i][1], translated[i]] for i in range(len(results))])
else:
return "not list"
else:
return "error"
|
flexible
|
{
"blob_id": "2f489a87e40bea979000dd429cc4cb0150ff4c3b",
"index": 908,
"step-1": "<mask token>\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\n<mask token>\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"step-2": "<mask token>\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"step-3": "<mask token>\nwith open('result.csv', newline='') as f:\n df = pd.read_csv(f)\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"step-4": "from flask import escape\nimport pandas as pd\nimport json\nimport requests\nwith open('result.csv', newline='') as f:\n df = pd.read_csv(f)\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"step-5": "from flask import escape\nimport pandas as pd\nimport json\nimport requests\n\nwith open('result.csv', newline='') as f:\n df = pd.read_csv(f)\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word']==word) & (df['common']==1)]\n else:\n word_df = df[df['word']==word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df) > 0 else (None, None)\n\n# order words based on either level or frequency. \ndef order_words(words, by=0, reverse=False, only_common=False):\n if (by not in {0, 1}): raise Exception(\"by is either 0 (by level), 1 (by frequency)\")\n if (by == 1): reverse = not reverse\n \n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x : x[1], reverse=reverse)\n return word_results\n\ndef translate_words(words, target):\n key = \"AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w\"\n words_string = \"\"\n for word in words:\n words_string += \"&q=\"\n words_string += word\n url = f\"https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}\"\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\ndef hello_http(request):\n request_args = request.args\n\n #'words', 'lang-from', 'lang-to', 'by', 'reverse'\n\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n \n results = order_words(words, by=by, reverse=reverse, only_common=only_common)\n translated = translate_words([result[0] for result in results], target)\n return json.dumps([[results[i][0], results[i][1], translated[i]] for i in range(len(results))])\n else:\n return \"not list\"\n else:\n return \"error\"",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Author: Loren Matilsky
# Date created: 03/02/2019
import matplotlib.pyplot as plt
import numpy as np
import sys, os
sys.path.append(os.environ['raco'])
sys.path.append(os.environ['rapl'])
sys.path.append(os.environ['rapl'] + '/timetrace')
from common import *
from cla_util import *
from plotcommon import *
from timey_util import *
# Set fontsize
fontsize = default_titlesize
# Read command-line arguments (CLAs)
args = sys.argv
clas0, clas = read_clas(args)
dirname = clas0['dirname']
dirname_stripped = strip_dirname(dirname)
# See if magnetism is "on"
magnetism = clas0['magnetism']
# defaults
kwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False, 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None, 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})
kwargs_default.update(plot_timey_kwargs_default)
# check for bad keys
find_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)
# overwrite defaults
kw = update_dict(kwargs_default, clas)
# add in groupname keys
kw.update(get_quantity_group(kw.groupname, magnetism))
# user may have wanted to change some groupname keys
kw = update_dict(kw, clas)
kw_plot_timey = update_dict(plot_timey_kwargs_default, clas)
# check if we want the real or imaginary vals
if kw.imag:
take_real = False
else:
take_real = True
# baseline time unit
time_unit, time_label, rotation, simple_label = get_time_unit(dirname)
# get grid info
di_grid = get_grid_info(dirname)
datatype = 'mertimelat'
dataname = 'mertimelat'
sampleaxis = di_grid['tt_lat']
if kw.rad:
datatype = 'mertimerad'
dataname = 'mertimerad'
sampleaxis = di_grid['rr']/rsun
if kw.mtimerad:
kw.rad = True
radlevs = get_slice_levels(dirname)
datatype = 'mtimerad'
dataname = 'mtimerad'
radlevs = get_slice_levels(dirname)
sampleaxis = radlevs.radius/rsun
datatype += '_mval%03i' %kw.mval
if 'groupname' in kw:
dataname += '_' + kw.groupname
if not kw.rcut is None:
dataname += '_rcut%0.3f' %kw.rcut
#dataname += clas0['tag']
# get data
if kw.the_file is None:
kw.the_file = get_widest_range_file(clas0['datadir'] +\
datatype + '/', dataname)
# Read in the data
print ('reading ' + kw.the_file)
di = get_dict(kw.the_file)
vals = di['vals']
times = di['times']
iters = di['iters']
qvals_avail = np.array(di['qvals'])
if kw.mtimerad:
samplevals_avail = di['latvals']
else:
samplevals_avail = di['samplevals']
iter1, iter2 = get_iters_from_file(kw.the_file)
times /= time_unit
# maybe thin data
if not kw.ntot == 'full':
print ("ntot = %i" %kw.ntot)
print ("before thin_data: len(times) = %i" %len(times))
times = thin_data(times, kw.ntot)
iters = thin_data(iters, kw.ntot)
vals = thin_data(vals, kw.ntot)
print ("after thin_data: len(times) = %i" %len(times))
# these all need to be arrays
kw.qvals = make_array(kw.qvals)
kw.isamplevals = make_array(kw.isamplevals)
if not isall(kw.samplevals):
kw.samplevals = make_array(kw.samplevals)
# get raw traces of desired variables
terms = []
for qval in kw.qvals:
qind = np.argmin(np.abs(qvals_avail - qval))
if take_real:
the_term = np.real(vals[:, :, :, qind])
else:
the_term = np.imag(vals[:, :, :, qind])
terms.append(the_term)
# set figure dimensions
sub_width_inches = 7.5
sub_height_inches = 2.0
margin_bottom_inches = 3/8 # space for x-axis label
margin_top_inches = 1
margin_left_inches = 5/8 # space for latitude label
margin_right_inches = 7/8 # space for colorbar
if 'ycut' in clas:
margin_right_inches *= 2
nplots = len(terms)
# determine desired levels to plot
if not kw.samplevals is None: # isamplevals being set indirectly
# check for special 'all' option
if isall(kw.samplevals):
kw.isamplevals = np.arange(len(samplevals_avail))
else:
kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')
for i in range(len(kw.samplevals)):
kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.samplevals[i]))
# Loop over the desired levels and save plots
for isampleval in kw.isamplevals:
if not kw.shav:
sampleval = samplevals_avail[isampleval]
# set some labels
axislabel = 'latitude (deg)'
samplelabel = r'$r/R_\odot$' + ' = %.3f' %sampleval
position_tag = '_rval%.3f' %sampleval
if kw.rad:
axislabel = r'$r/R_\odot$'
samplelabel = 'lat = ' + lat_format(sampleval)
position_tag = '_lat' + lat_format(sampleval)
# Put some useful information on the title
maintitle = dirname_stripped
maintitle += '\n' + samplelabel
maintitle += '\nmval = %03i' %kw.mval
if kw.navg is None:
maintitle += '\nt_avg = none'
else:
averaging_time = (times[-1] - times[0])/len(times)*kw.navg
maintitle += '\n' + ('t_avg = %.1f Prot' %averaging_time)
print('plotting sampleval = %0.3f (i = %02i)' %(sampleval, isampleval))
# make plot
fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=sub_width_inches, sub_height_inches=sub_height_inches, margin_left_inches=margin_left_inches, margin_right_inches=margin_right_inches, margin_top_inches=margin_top_inches, margin_bottom_inches=margin_bottom_inches)
for iplot in range(nplots):
ax = axs[iplot, 0]
if kw.rad:
field = terms[iplot][:, isampleval, :]
else:
field = terms[iplot][:, :, isampleval]
plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)
# title the plot
ax.set_title(kw.titles[iplot], fontsize=fontsize)
# Turn the x tick labels off for the top strips
#if iplot < nplots - 1:
# ax.set_xticklabels([])
# Put time label on bottom strip
if iplot == nplots - 1:
ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)
# Put ylabel on middle strip
if iplot == nplots//2:
ax.set_ylabel(axislabel, fontsize=fontsize)
fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle, fontsize=fontsize, ha='left', va='bottom')
# Save the plot
if clas0['saveplot']:
# Make appropriate file name to save
# save the figure
basename = dataname + '_%08i_%08i' %(iter1, iter2)
plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])
if take_real:
realtag = '_real'
else:
realtag = '_imag'
savename = basename + position_tag + realtag + '.png'
print ("saving", plotdir + '/' + savename)
plt.savefig(plotdir + '/' + savename, dpi=200)
# Show the plot if only plotting at one latitude
if clas0['showplot'] and len(kw.isamplevals) == 1:
plt.show()
else:
plt.close()
print ("=======================================")
|
normal
|
{
"blob_id": "97a059d6d34b924a0512ebe6ff5ab1d5ccc072d5",
"index": 8966,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\n<mask token>\nkwargs_default.update(plot_timey_kwargs_default)\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\n<mask token>\nkw.update(get_quantity_group(kw.groupname, magnetism))\n<mask token>\nif kw.imag:\n take_real = False\nelse:\n take_real = True\n<mask token>\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr'] / rsun\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius / rsun\ndatatype += '_mval%03i' % kw.mval\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' % kw.rcut\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',\n dataname)\nprint('reading ' + kw.the_file)\n<mask token>\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals']\n<mask token>\ntimes /= time_unit\nif not kw.ntot == 'full':\n print('ntot = %i' % kw.ntot)\n print('before thin_data: len(times) = %i' % len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print('after thin_data: len(times) = %i' % len(times))\n<mask token>\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\n<mask token>\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\n<mask token>\nif 'ycut' in clas:\n margin_right_inches *= 2\n<mask token>\nif not kw.samplevals is None:\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.\n samplevals[i]))\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n axislabel = 'latitude (deg)'\n samplelabel = '$r/R_\\\\odot$' + ' = %.3f' % sampleval\n position_tag = '_rval%.3f' % sampleval\n if kw.rad:\n axislabel = '$r/R_\\\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n maintitle = dirname_stripped\n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' % kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0]) / len(times) * kw.navg\n maintitle += '\\n' + 't_avg = %.1f Prot' % averaging_time\n print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=\n sub_width_inches, sub_height_inches=sub_height_inches,\n margin_left_inches=margin_left_inches, margin_right_inches=\n margin_right_inches, margin_top_inches=margin_top_inches,\n margin_bottom_inches=margin_bottom_inches)\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n if iplot == nplots // 2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,\n fontsize=fontsize, ha='left', va='bottom')\n if clas0['saveplot']:\n basename = dataname + '_%08i_%08i' % (iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print('saving', plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print('=======================================')\n",
"step-3": "<mask token>\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\n<mask token>\nfontsize = default_titlesize\nargs = sys.argv\nclas0, clas = read_clas(args)\ndirname = clas0['dirname']\ndirname_stripped = strip_dirname(dirname)\nmagnetism = clas0['magnetism']\nkwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False,\n 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None,\n 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})\nkwargs_default.update(plot_timey_kwargs_default)\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\nkw = update_dict(kwargs_default, clas)\nkw.update(get_quantity_group(kw.groupname, magnetism))\nkw = update_dict(kw, clas)\nkw_plot_timey = update_dict(plot_timey_kwargs_default, clas)\nif kw.imag:\n take_real = False\nelse:\n take_real = True\ntime_unit, time_label, rotation, simple_label = get_time_unit(dirname)\ndi_grid = get_grid_info(dirname)\ndatatype = 'mertimelat'\ndataname = 'mertimelat'\nsampleaxis = di_grid['tt_lat']\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr'] / rsun\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius / rsun\ndatatype += '_mval%03i' % kw.mval\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' % kw.rcut\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',\n dataname)\nprint('reading ' + kw.the_file)\ndi = get_dict(kw.the_file)\nvals = di['vals']\ntimes = di['times']\niters = di['iters']\nqvals_avail = np.array(di['qvals'])\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals']\niter1, iter2 = get_iters_from_file(kw.the_file)\ntimes /= time_unit\nif not kw.ntot == 'full':\n print('ntot = %i' % kw.ntot)\n print('before thin_data: len(times) = %i' % len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print('after thin_data: len(times) = %i' % len(times))\nkw.qvals = make_array(kw.qvals)\nkw.isamplevals = make_array(kw.isamplevals)\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\nterms = []\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\nsub_width_inches = 7.5\nsub_height_inches = 2.0\nmargin_bottom_inches = 3 / 8\nmargin_top_inches = 1\nmargin_left_inches = 5 / 8\nmargin_right_inches = 7 / 8\nif 'ycut' in clas:\n margin_right_inches *= 2\nnplots = len(terms)\nif not kw.samplevals is None:\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.\n samplevals[i]))\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n axislabel = 'latitude (deg)'\n samplelabel = '$r/R_\\\\odot$' + ' = %.3f' % sampleval\n position_tag = '_rval%.3f' % sampleval\n if kw.rad:\n axislabel = '$r/R_\\\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n maintitle = dirname_stripped\n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' % kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0]) / len(times) * kw.navg\n maintitle += '\\n' + 't_avg = %.1f Prot' % averaging_time\n print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=\n sub_width_inches, sub_height_inches=sub_height_inches,\n margin_left_inches=margin_left_inches, margin_right_inches=\n margin_right_inches, margin_top_inches=margin_top_inches,\n margin_bottom_inches=margin_bottom_inches)\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n if iplot == nplots // 2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,\n fontsize=fontsize, ha='left', va='bottom')\n if clas0['saveplot']:\n basename = dataname + '_%08i_%08i' % (iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print('saving', plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print('=======================================')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport sys, os\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\nfrom common import *\nfrom cla_util import *\nfrom plotcommon import *\nfrom timey_util import *\nfontsize = default_titlesize\nargs = sys.argv\nclas0, clas = read_clas(args)\ndirname = clas0['dirname']\ndirname_stripped = strip_dirname(dirname)\nmagnetism = clas0['magnetism']\nkwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False,\n 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None,\n 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})\nkwargs_default.update(plot_timey_kwargs_default)\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\nkw = update_dict(kwargs_default, clas)\nkw.update(get_quantity_group(kw.groupname, magnetism))\nkw = update_dict(kw, clas)\nkw_plot_timey = update_dict(plot_timey_kwargs_default, clas)\nif kw.imag:\n take_real = False\nelse:\n take_real = True\ntime_unit, time_label, rotation, simple_label = get_time_unit(dirname)\ndi_grid = get_grid_info(dirname)\ndatatype = 'mertimelat'\ndataname = 'mertimelat'\nsampleaxis = di_grid['tt_lat']\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr'] / rsun\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius / rsun\ndatatype += '_mval%03i' % kw.mval\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' % kw.rcut\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] + datatype + '/',\n dataname)\nprint('reading ' + kw.the_file)\ndi = get_dict(kw.the_file)\nvals = di['vals']\ntimes = di['times']\niters = di['iters']\nqvals_avail = np.array(di['qvals'])\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals']\niter1, iter2 = get_iters_from_file(kw.the_file)\ntimes /= time_unit\nif not kw.ntot == 'full':\n print('ntot = %i' % kw.ntot)\n print('before thin_data: len(times) = %i' % len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print('after thin_data: len(times) = %i' % len(times))\nkw.qvals = make_array(kw.qvals)\nkw.isamplevals = make_array(kw.isamplevals)\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\nterms = []\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\nsub_width_inches = 7.5\nsub_height_inches = 2.0\nmargin_bottom_inches = 3 / 8\nmargin_top_inches = 1\nmargin_left_inches = 5 / 8\nmargin_right_inches = 7 / 8\nif 'ycut' in clas:\n margin_right_inches *= 2\nnplots = len(terms)\nif not kw.samplevals is None:\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.\n samplevals[i]))\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n axislabel = 'latitude (deg)'\n samplelabel = '$r/R_\\\\odot$' + ' = %.3f' % sampleval\n position_tag = '_rval%.3f' % sampleval\n if kw.rad:\n axislabel = '$r/R_\\\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n maintitle = dirname_stripped\n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' % kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0]) / len(times) * kw.navg\n maintitle += '\\n' + 't_avg = %.1f Prot' % averaging_time\n print('plotting sampleval = %0.3f (i = %02i)' % (sampleval, isampleval))\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=\n sub_width_inches, sub_height_inches=sub_height_inches,\n margin_left_inches=margin_left_inches, margin_right_inches=\n margin_right_inches, margin_top_inches=margin_top_inches,\n margin_bottom_inches=margin_bottom_inches)\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n if iplot == nplots // 2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle,\n fontsize=fontsize, ha='left', va='bottom')\n if clas0['saveplot']:\n basename = dataname + '_%08i_%08i' % (iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print('saving', plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print('=======================================')\n",
"step-5": "# Author: Loren Matilsky\n# Date created: 03/02/2019\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys, os\nsys.path.append(os.environ['raco'])\nsys.path.append(os.environ['rapl'])\nsys.path.append(os.environ['rapl'] + '/timetrace')\nfrom common import *\nfrom cla_util import *\nfrom plotcommon import *\nfrom timey_util import *\n\n# Set fontsize\nfontsize = default_titlesize\n\n# Read command-line arguments (CLAs)\nargs = sys.argv\nclas0, clas = read_clas(args)\ndirname = clas0['dirname']\ndirname_stripped = strip_dirname(dirname)\n# See if magnetism is \"on\"\nmagnetism = clas0['magnetism']\n\n# defaults\nkwargs_default = dict({'the_file': None, 'ntot': 500, 'rad': False, 'isamplevals': np.array([0]), 'samplevals': None, 'rcut': None, 'groupname': 'b', 'mval': 1, 'imag': False, 'mtimerad': False})\nkwargs_default.update(plot_timey_kwargs_default)\n\n# check for bad keys\nfind_bad_keys(kwargs_default, clas, clas0['routinename'], justwarn=True)\n\n# overwrite defaults\nkw = update_dict(kwargs_default, clas)\n# add in groupname keys\nkw.update(get_quantity_group(kw.groupname, magnetism))\n# user may have wanted to change some groupname keys\nkw = update_dict(kw, clas)\nkw_plot_timey = update_dict(plot_timey_kwargs_default, clas)\n\n# check if we want the real or imaginary vals\nif kw.imag:\n take_real = False\nelse:\n take_real = True\n\n# baseline time unit\ntime_unit, time_label, rotation, simple_label = get_time_unit(dirname)\n\n# get grid info\ndi_grid = get_grid_info(dirname)\n\ndatatype = 'mertimelat'\ndataname = 'mertimelat'\nsampleaxis = di_grid['tt_lat']\nif kw.rad:\n datatype = 'mertimerad'\n dataname = 'mertimerad'\n sampleaxis = di_grid['rr']/rsun\n\nif kw.mtimerad:\n kw.rad = True\n radlevs = get_slice_levels(dirname)\n datatype = 'mtimerad'\n dataname = 'mtimerad'\n radlevs = get_slice_levels(dirname)\n sampleaxis = radlevs.radius/rsun\n\ndatatype += '_mval%03i' %kw.mval\n\nif 'groupname' in kw:\n dataname += '_' + kw.groupname\nif not kw.rcut is None:\n dataname += '_rcut%0.3f' %kw.rcut\n\n#dataname += clas0['tag']\n# get data\nif kw.the_file is None:\n kw.the_file = get_widest_range_file(clas0['datadir'] +\\\n datatype + '/', dataname)\n\n# Read in the data\nprint ('reading ' + kw.the_file)\ndi = get_dict(kw.the_file)\nvals = di['vals']\ntimes = di['times']\niters = di['iters']\nqvals_avail = np.array(di['qvals'])\nif kw.mtimerad:\n samplevals_avail = di['latvals']\nelse:\n samplevals_avail = di['samplevals'] \n\niter1, iter2 = get_iters_from_file(kw.the_file)\ntimes /= time_unit\n\n# maybe thin data\nif not kw.ntot == 'full':\n print (\"ntot = %i\" %kw.ntot)\n print (\"before thin_data: len(times) = %i\" %len(times))\n times = thin_data(times, kw.ntot)\n iters = thin_data(iters, kw.ntot)\n vals = thin_data(vals, kw.ntot)\n print (\"after thin_data: len(times) = %i\" %len(times))\n\n# these all need to be arrays\nkw.qvals = make_array(kw.qvals)\nkw.isamplevals = make_array(kw.isamplevals)\nif not isall(kw.samplevals):\n kw.samplevals = make_array(kw.samplevals)\n\n# get raw traces of desired variables\nterms = []\nfor qval in kw.qvals:\n qind = np.argmin(np.abs(qvals_avail - qval))\n if take_real:\n the_term = np.real(vals[:, :, :, qind])\n else:\n the_term = np.imag(vals[:, :, :, qind])\n terms.append(the_term)\n\n# set figure dimensions\nsub_width_inches = 7.5\nsub_height_inches = 2.0\nmargin_bottom_inches = 3/8 # space for x-axis label\nmargin_top_inches = 1\nmargin_left_inches = 5/8 # space for latitude label\nmargin_right_inches = 7/8 # space for colorbar\nif 'ycut' in clas:\n margin_right_inches *= 2\nnplots = len(terms)\n\n# determine desired levels to plot\nif not kw.samplevals is None: # isamplevals being set indirectly\n # check for special 'all' option\n if isall(kw.samplevals):\n kw.isamplevals = np.arange(len(samplevals_avail))\n else:\n kw.isamplevals = np.zeros_like(kw.samplevals, dtype='int')\n for i in range(len(kw.samplevals)):\n kw.isamplevals[i] = np.argmin(np.abs(samplevals_avail - kw.samplevals[i]))\n\n# Loop over the desired levels and save plots\nfor isampleval in kw.isamplevals:\n if not kw.shav:\n sampleval = samplevals_avail[isampleval]\n\n # set some labels \n axislabel = 'latitude (deg)'\n samplelabel = r'$r/R_\\odot$' + ' = %.3f' %sampleval\n position_tag = '_rval%.3f' %sampleval\n if kw.rad:\n axislabel = r'$r/R_\\odot$'\n samplelabel = 'lat = ' + lat_format(sampleval)\n position_tag = '_lat' + lat_format(sampleval)\n\n # Put some useful information on the title\n maintitle = dirname_stripped \n maintitle += '\\n' + samplelabel\n maintitle += '\\nmval = %03i' %kw.mval\n if kw.navg is None:\n maintitle += '\\nt_avg = none'\n else:\n averaging_time = (times[-1] - times[0])/len(times)*kw.navg\n maintitle += '\\n' + ('t_avg = %.1f Prot' %averaging_time)\n\n print('plotting sampleval = %0.3f (i = %02i)' %(sampleval, isampleval))\n \n # make plot\n fig, axs, fpar = make_figure(nplots=nplots, ncol=1, sub_width_inches=sub_width_inches, sub_height_inches=sub_height_inches, margin_left_inches=margin_left_inches, margin_right_inches=margin_right_inches, margin_top_inches=margin_top_inches, margin_bottom_inches=margin_bottom_inches)\n\n for iplot in range(nplots):\n ax = axs[iplot, 0]\n if kw.rad:\n field = terms[iplot][:, isampleval, :]\n else:\n field = terms[iplot][:, :, isampleval]\n plot_timey(field, times, sampleaxis, fig, ax, **kw_plot_timey)\n \n # title the plot\n ax.set_title(kw.titles[iplot], fontsize=fontsize)\n\n # Turn the x tick labels off for the top strips\n #if iplot < nplots - 1:\n # ax.set_xticklabels([])\n # Put time label on bottom strip \n if iplot == nplots - 1:\n ax.set_xlabel('time (' + time_label + ')', fontsize=fontsize)\n # Put ylabel on middle strip\n if iplot == nplots//2:\n ax.set_ylabel(axislabel, fontsize=fontsize)\n\n fig.text(fpar['margin_left'], 1 - fpar['margin_top'], maintitle, fontsize=fontsize, ha='left', va='bottom')\n\n # Save the plot\n if clas0['saveplot']:\n # Make appropriate file name to save\n\n # save the figure\n basename = dataname + '_%08i_%08i' %(iter1, iter2)\n plotdir = my_mkdir(clas0['plotdir'] + '/' + datatype + clas0['tag'])\n if take_real:\n realtag = '_real'\n else:\n realtag = '_imag'\n savename = basename + position_tag + realtag + '.png'\n print (\"saving\", plotdir + '/' + savename)\n plt.savefig(plotdir + '/' + savename, dpi=200)\n\n # Show the plot if only plotting at one latitude\n if clas0['showplot'] and len(kw.isamplevals) == 1:\n plt.show()\n else:\n plt.close()\n print (\"=======================================\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.signals import post_save
from apps.common.constants import NOTIFICATION_TYPE_CHOICES, INFO
from apps.core.models import BaseModel
from apps.core.utils.helpers import get_upload_path
from apps.core.utils.push_notification import send_push_message
User = get_user_model()
class City(BaseModel):
name = models.CharField(max_length=255, db_index=True)
def __str__(self):
return self.name
class Article(BaseModel):
created_by = models.ForeignKey(User, related_name='articles', on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=200)
description = models.TextField()
# Below fields are optional
image = models.ImageField(
upload_to=get_upload_path,
blank=True
)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.title
class UserNotification(BaseModel):
title = models.CharField(max_length=150)
sent_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name='sent_notifications')
sent_to = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name='notifications')
content = models.TextField(blank=True)
is_read = models.BooleanField(default=False) # To mark notification as read
notification_type = models.CharField(
max_length=15,
choices=NOTIFICATION_TYPE_CHOICES,
default=INFO
)
def __str__(self):
if self.sent_by:
return f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'
return f'{str(self.sent_to)} content {self.content}'
class Meta:
ordering = ('is_read', '-created_at')
def send_push_notification(sender, instance, created, **kwargs):
if created:
receiver = instance.sent_to
receiver_device = receiver.devices.filter(is_active=True).first()
if receiver_device:
send_push_message(
receiver_device.registration_id,
title=instance.title,
body=instance.content
)
def send_article_notifications(sender, instance, created, **kwargs):
if created:
UserNotification.objects.bulk_create([
UserNotification(**{
'title': instance.title,
'sent_to': user,
'notification_type': INFO,
'content': instance.description
}) for user in User.objects.all()
])
post_save.connect(send_push_notification, sender=UserNotification)
post_save.connect(send_article_notifications, sender=Article)
|
normal
|
{
"blob_id": "c2260278c8dfb353f55ee9ea3495049b08169447",
"index": 4115,
"step-1": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\n<mask token>\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([UserNotification(**{'title':\n instance.title, 'sent_to': user, 'notification_type': INFO,\n 'content': instance.description}) for user in User.objects.all()])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\ndef send_push_notification(sender, instance, created, **kwargs):\n if created:\n receiver = instance.sent_to\n receiver_device = receiver.devices.filter(is_active=True).first()\n if receiver_device:\n send_push_message(receiver_device.registration_id, title=\n instance.title, body=instance.content)\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([UserNotification(**{'title':\n instance.title, 'sent_to': user, 'notification_type': INFO,\n 'content': instance.description}) for user in User.objects.all()])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\ndef send_push_notification(sender, instance, created, **kwargs):\n if created:\n receiver = instance.sent_to\n receiver_device = receiver.devices.filter(is_active=True).first()\n if receiver_device:\n send_push_message(receiver_device.registration_id, title=\n instance.title, body=instance.content)\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([UserNotification(**{'title':\n instance.title, 'sent_to': user, 'notification_type': INFO,\n 'content': instance.description}) for user in User.objects.all()])\n\n\npost_save.connect(send_push_notification, sender=UserNotification)\npost_save.connect(send_article_notifications, sender=Article)\n",
"step-5": "from django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.db.models.signals import post_save\n\nfrom apps.common.constants import NOTIFICATION_TYPE_CHOICES, INFO\nfrom apps.core.models import BaseModel\nfrom apps.core.utils.helpers import get_upload_path\nfrom apps.core.utils.push_notification import send_push_message\n\nUser = get_user_model()\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete=models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n # Below fields are optional\n image = models.ImageField(\n upload_to=get_upload_path,\n blank=True\n )\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True, related_name='sent_notifications')\n sent_to = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True, related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False) # To mark notification as read\n notification_type = models.CharField(\n max_length=15,\n choices=NOTIFICATION_TYPE_CHOICES,\n default=INFO\n )\n\n def __str__(self):\n if self.sent_by:\n return f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n return f'{str(self.sent_to)} content {self.content}'\n\n class Meta:\n ordering = ('is_read', '-created_at')\n\n\ndef send_push_notification(sender, instance, created, **kwargs):\n if created:\n receiver = instance.sent_to\n receiver_device = receiver.devices.filter(is_active=True).first()\n if receiver_device:\n send_push_message(\n receiver_device.registration_id,\n title=instance.title,\n body=instance.content\n )\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([\n UserNotification(**{\n 'title': instance.title,\n 'sent_to': user,\n 'notification_type': INFO,\n 'content': instance.description\n }) for user in User.objects.all()\n ])\n\n\npost_save.connect(send_push_notification, sender=UserNotification)\npost_save.connect(send_article_notifications, sender=Article)\n",
"step-ids": [
9,
10,
11,
12,
15
]
}
|
[
9,
10,
11,
12,
15
] |
<|reserved_special_token_0|>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
<|reserved_special_token_0|>
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
<|reserved_special_token_0|>
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
<|reserved_special_token_0|>
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
<|reserved_special_token_0|>
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
"""
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
"""
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
def to_train_seq(*args):
"""
:param args: 词转为的id的序列 词性转为id的序列
:return:
"""
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
"""
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
"""
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(
size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
self.word_embeddings = tf.Variable(tf.truncated_normal([len(
word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(
char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.
word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.
char_ids)
s = tf.shape(char_embedded)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],
dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_char), cell_bw=cells(hidden_size_char), inputs=
char_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_char_%d' % n)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *
hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1)
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw
) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(
hidden_size_word), cell_bw=cells(hidden_size_word), inputs=
word_embedded, dtype=tf.float32, scope=
'bidirectional_rnn_word_%d' % n)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate
).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,
transition_params, self.lengths)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
"""
加载文件并且解析
:param file: 文件名
:return: 词<->词性
"""
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
"""
:param string:
:return:
"""
string = re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
"""
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
"""
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower()
tag = labels[no]
for c in text:
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag])
if text not in word2idx:
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text])
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])
def to_train_seq(*args):
"""
:param args: 词转为的id的序列 词性转为id的序列
:return:
"""
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
"""
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
"""
x = [[len(idx2word[i]) for i in k] for k in batch]
maxlen = max([j for i in x for j in i])
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1 - no] = char2idx[c]
return temp
def pred2label(pred):
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2}
tag2idx = {'PAD': 0}
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y)
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape)
print(X_char_seq.shape)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape)
print(X_char_seq_test.shape)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 0.001
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i:min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i:min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run([model.accuracy, model.cost, model.
optimizer], feed_dict={model.word_ids: batch_x, model.
char_ids: batch_char, model.labels: batch_y})
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.
format(e, i // batch_size + 1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run([model.accuracy, model.cost], feed_dict={
model.word_ids: batch_x, model.char_ids: batch_char, model.
labels: batch_y})
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.
format(e, i // batch_size + 1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]
predicted = pred2label(sess.run(model.tags_seq, feed_dict={model.
word_ids: batch_x, model.char_ids: batch_char}))
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(
predict_Y).ravel()))
<|reserved_special_token_1|>
"""
@file : 001-rnn+lstm+crf.py
@author: xiaolu
@time : 2019-09-06
"""
import re
import numpy as np
import tensorflow as tf
from sklearn.metrics import classification_report
class Model:
def __init__(self, dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers):
'''
:param dim_word: 词的维度
:param dim_char: 字符维度
:param dropout: dropout
:param learning_rate: 学习率
:param hidden_size_char: 字符隐层输出维度
:param hidden_size_word: 词隐层输出维度
:param num_layers: 几层
'''
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),
output_keep_prob=dropout
)
# 1. define input
self.word_ids = tf.placeholder(tf.int32, shape=[None, None])
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])
self.labels = tf.placeholder(tf.int32, shape=[None, None])
self.maxlen = tf.shape(self.word_ids)[1]
self.lengths = tf.count_nonzero(self.word_ids, 1)
# 2. embedding
self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))
self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))
word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)
char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_char),
cell_bw=cells(hidden_size_char),
inputs=char_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_char_%d' % n
)
char_embedded = tf.concat((out_fw, out_bw), 2)
output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])
word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接
for n in range(num_layers):
(out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cells(hidden_size_word),
cell_bw=cells(hidden_size_word),
inputs=word_embedded,
dtype=tf.float32,
scope='bidirectional_rnn_word_%d' % n
)
word_embedded = tf.concat((out_fw, out_bw), 2)
logits = tf.layers.Dense(word_embedded, len(idx2tag))
y_t = self.labels
log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(
logits, y_t, self.lengths
)
self.cost = tf.reduce_mean(-log_likelihood)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)
self.tags_seq, tags_score = tf.contrib.crf.crf_decode(
logits, transition_params, self.lengths
)
self.tags_seq = tf.identity(self.tags_seq, name='logits')
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(self.tags_seq, mask)
mask_label = tf.boolean_mask(y_t, mask)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def parse(file):
'''
加载文件并且解析
:param file: 文件名
:return: 词<->词性
'''
with open(file) as fopen:
texts = fopen.read().split('\n')
left, right = [], []
for text in texts:
if '-DOCSTART' in text or not len(text):
continue
splitted = text.split()
left.append(splitted[0])
right.append(splitted[-1])
return left, right
def process_string(string):
'''
:param string:
:return:
'''
string= re.sub('[^A-Za-z0-9\-\/ ]+', ' ', string).split()
return ' '.join([to_title(y.strip()) for y in string])
def to_title(string):
if string.isupper():
string = string.title()
return string
def parse_XY(texts, labels):
'''
整理词性表 词表 字符表 并将文本转为对应的数字序列
:param texts: 文本 词的一个列表
:param labels: 词性的一个列表
:return: 词转为id的序列 词性转为id的序列
'''
global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx
X, Y = [], []
for no, text in enumerate(texts):
text = text.lower() # 当前这个单词转小写
tag = labels[no] # 取出对应的词性
for c in text: # 字符表
if c not in char2idx:
char2idx[c] = char_idx
char_idx += 1
if tag not in tag2idx: # 词性表
tag2idx[tag] = tag_idx
tag_idx += 1
Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值
if text not in word2idx: # 词表
word2idx[text] = word_idx
word_idx += 1
X.append(word2idx[text]) # 将词转为id的标号
return X, np.array(Y)
def iter_seq(x):
return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])
def to_train_seq(*args):
'''
:param args: 词转为的id的序列 词性转为id的序列
:return:
'''
return [iter_seq(x) for x in args]
def generate_char_seq(batch):
'''
传进来是50一个块 总共有多少块
然后将每块的单词转为字符序列
:param batch:
:return:
'''
x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度
maxlen = max([j for i in x for j in i]) # 最大长度
temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)
for i in range(batch.shape[0]):
for k in range(batch.shape[1]):
for no, c in enumerate(idx2word[batch[i, k]]):
temp[i, k, -1-no] = char2idx[c]
return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]
def pred2label(pred):
# 将预测结果转为标签
out = []
for pred_i in pred:
out_i = []
for p in pred_i:
out_i.append(idx2tag[p])
out.append(out_i)
return out
if __name__ == '__main__':
left_train, right_train = parse('./data/eng.train')
left_test, right_test = parse('./data/eng.testa')
# print(left_train[:10])
# print(right_train[:10])
word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表
tag2idx = {'PAD': 0} # 词性表
char2idx = {'PAD': 0}
word_idx = 3
tag_idx = 1
char_idx = 1
train_X, train_Y = parse_XY(left_train, right_train)
test_X, test_Y = parse_XY(left_test, right_test)
# print(train_X[:20])
# print(train_Y[:20])
idx2word = {idx: tag for tag, idx in word2idx.items()}
idx2tag = {i: w for w, i in tag2idx.items()}
seq_len = 50
X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落
X_char_seq = generate_char_seq(X_seq)
print(X_seq.shape) # (203571, 50)
print(X_char_seq.shape) # (203571, 50, 61)
X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)
X_char_seq_test = generate_char_seq(X_seq_test)
print(X_seq_test.shape) # (51312, 50)
print(X_char_seq_test.shape) # (51312, 50, 27)
train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq
test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test
tf.reset_default_graph()
sess = tf.Session()
dim_word = 64
dim_char = 128
dropout = 0.8
learning_rate = 1e-3
hidden_size_char = 128
hidden_size_word = 128
num_layers = 2
batch_size = 32
model = Model(dim_word, dim_char, dropout, learning_rate,
hidden_size_char, hidden_size_word, num_layers)
sess.run(tf.global_variables_initializer())
for e in range(3):
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, len(train_X), batch_size):
batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]
batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]
batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
train_loss += cost
train_acc += acc
print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
model.labels: batch_y
},
)
test_loss += cost
test_acc += acc
print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))
train_loss /= len(train_X) / batch_size
train_acc /= len(train_X) / batch_size
test_loss /= len(test_X) / batch_size
test_acc /= len(test_X) / batch_size
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (e, train_loss, train_acc, test_loss, test_acc))
real_Y, predict_Y = [], []
for i in range(0, len(test_X), batch_size):
batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]
batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]
batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]
predicted = pred2label(
sess.run(model.tags_seq,
feed_dict={
model.word_ids: batch_x,
model.char_ids: batch_char,
},
)
)
real = pred2label(batch_y)
predict_Y.extend(predicted)
real_Y.extend(real)
print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))
|
flexible
|
{
"blob_id": "5d9c8e235385ff53c7510994826ff3a04e4a5888",
"index": 10,
"step-1": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\n<mask token>\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\n<mask token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\n<mask token>\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\n<mask token>\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表 词表 字符表 并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列 词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Model:\n\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n \"\"\"\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n \"\"\"\n\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(tf.nn.rnn_cell.LSTMCell(\n size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(\n word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(\n char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.\n word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.\n char_ids)\n s = tf.shape(char_embedded)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2],\n dim_char])\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_char), cell_bw=cells(hidden_size_char), inputs=\n char_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_char_%d' % n)\n char_embedded = tf.concat((out_fw, out_bw), 2)\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2 *\n hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1)\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw\n ) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cells(\n hidden_size_word), cell_bw=cells(hidden_size_word), inputs=\n word_embedded, dtype=tf.float32, scope=\n 'bidirectional_rnn_word_%d' % n)\n word_embedded = tf.concat((out_fw, out_bw), 2)\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths)\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate\n ).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(logits,\n transition_params, self.lengths)\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n y_t = tf.cast(y_t, tf.int32)\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n \"\"\"\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n \"\"\"\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n \"\"\"\n :param string:\n :return:\n \"\"\"\n string = re.sub('[^A-Za-z0-9\\\\-\\\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n \"\"\"\n 整理词性表 词表 字符表 并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n \"\"\"\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower()\n tag = labels[no]\n for c in text:\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx:\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag])\n if text not in word2idx:\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text])\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i:i + seq_len] for i in range(0, len(x) - seq_len, 1)])\n\n\ndef to_train_seq(*args):\n \"\"\"\n :param args: 词转为的id的序列 词性转为id的序列\n :return:\n \"\"\"\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n \"\"\"\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n \"\"\"\n x = [[len(idx2word[i]) for i in k] for k in batch]\n maxlen = max([j for i in x for j in i])\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1 - no] = char2idx[c]\n return temp\n\n\ndef pred2label(pred):\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2}\n tag2idx = {'PAD': 0}\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n seq_len = 50\n X_seq, Y_seq = to_train_seq(train_X, train_Y)\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape)\n print(X_char_seq.shape)\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape)\n print(X_char_seq_test.shape)\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n tf.reset_default_graph()\n sess = tf.Session()\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 0.001\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i:min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i:min(i + batch_size, train_X.shape[0])]\n acc, cost, _ = sess.run([model.accuracy, model.cost, model.\n optimizer], feed_dict={model.word_ids: batch_x, model.\n char_ids: batch_char, model.labels: batch_y})\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run([model.accuracy, model.cost], feed_dict={\n model.word_ids: batch_x, model.char_ids: batch_char, model.\n labels: batch_y})\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.\n format(e, i // batch_size + 1, cost, acc))\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n print(\n 'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(sess.run(model.tags_seq, feed_dict={model.\n word_ids: batch_x, model.char_ids: batch_char}))\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n print(classification_report(np.array(real_Y).ravel(), np.array(\n predict_Y).ravel()))\n",
"step-5": "\"\"\"\n\n@file : 001-rnn+lstm+crf.py\n\n@author: xiaolu\n\n@time : 2019-09-06\n\n\"\"\"\nimport re\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import classification_report\n\n\nclass Model:\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n '''\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n '''\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(\n tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout\n )\n\n # 1. define input\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n\n # 2. embedding\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)\n\n s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_char),\n cell_bw=cells(hidden_size_char),\n inputs=char_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_char_%d' % n\n )\n char_embedded = tf.concat((out_fw, out_bw), 2)\n\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_word),\n cell_bw=cells(hidden_size_word),\n inputs=word_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_word_%d' % n\n )\n word_embedded = tf.concat((out_fw, out_bw), 2)\n\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths\n )\n\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(\n logits, transition_params, self.lengths\n )\n\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n\n y_t = tf.cast(y_t, tf.int32)\n\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n '''\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n '''\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n '''\n :param string:\n :return:\n '''\n string= re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n '''\n 整理词性表 词表 字符表 并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n '''\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower() # 当前这个单词转小写\n tag = labels[no] # 取出对应的词性\n for c in text: # 字符表\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx: # 词性表\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值\n if text not in word2idx: # 词表\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text]) # 将词转为id的标号\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])\n\n\ndef to_train_seq(*args):\n '''\n :param args: 词转为的id的序列 词性转为id的序列\n :return:\n '''\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n '''\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n '''\n x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度\n maxlen = max([j for i in x for j in i]) # 最大长度\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1-no] = char2idx[c]\n return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]\n\n\ndef pred2label(pred):\n # 将预测结果转为标签\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n # print(left_train[:10])\n # print(right_train[:10])\n\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表\n tag2idx = {'PAD': 0} # 词性表\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n # print(train_X[:20])\n # print(train_Y[:20])\n\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n\n seq_len = 50\n\n X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape) # (203571, 50)\n print(X_char_seq.shape) # (203571, 50, 61)\n\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape) # (51312, 50)\n print(X_char_seq_test.shape) # (51312, 50, 27)\n\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n\n tf.reset_default_graph()\n sess = tf.Session()\n\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 1e-3\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n\n batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]\n\n acc, cost, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run(\n [model.accuracy, model.cost],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n\n print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(\n sess.run(model.tags_seq,\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n },\n )\n )\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n\n print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))",
"step-ids": [
5,
8,
10,
11,
13
]
}
|
[
5,
8,
10,
11,
13
] |
#Week 5
#Task 1.1
a = 13
b = 14
calculation = a + 1 <=b
calculation2 = a + 1 >=b
calculation3 = a + 1 !=b
print (calculation)
print (calculation2)
print (calculation3)
#Task 1.2
myage = input("How old are you : ")
print ("Hi there, You are " +myage+ " years old")
#Task 1.3
num1 = input("Enter the first number : ")
num2 = input("Enter the second number : ")
result = num1 + num2
print ("The result is " +result)
#Task 1.4
print ("average: %.2f" % ((3 + 11 + 78 + 112 + 4 + 18) / 6))
#Task 1.5
num1 = int(input ("Enter a number : "))
remainder = num1 % 7
print (remainder)
#Task 1.6
num1 = int(input ("Enter a number : "))
remainder = num1 % 7
print (remainder)
num2 = 7
num3 = num1 / num2
print (num3)
#Task 1.8
userinput = input("Enter Y to quit : ")
if userinput == 'Y':
print ("Goodbye")
elif userinput == 'y':
print ("Goodbye")
else:
print ("Round 2 ~ Fight!")
#Task 1.9a
x = int(input ("Enter a number : "))
if (x) >0:
print(x)
#Task 1.9b
if 1 + x > x ** sqrt(2) : y = y + x
#Task 1.9c
x = 1
y = 5
if x == 1:
y += 1
print (x)
print (y)
#Task 1.9d
letterGrade = int(input("Enter your grade : "))
if letterGrade >= 90: print ("A")
elif letterGrade >= 80: print ("B")
elif letterGrade >= 70: print ("C")
elif letterGrade >= 60: print ("D")
elif letterGrade <= 40: print ("F")
#Task 1.10
richter = float(input ("Enter magnitude on richter scale : "))
if richter >= 8.0: print ("Most structures fall")
elif richter >= 7.0: print ("many buildings destroyed")
elif richter >= 6.0: print ("Many buildings considerbly damaged, some collapse")
elif richter >= 4.5: print ("Damage to poorly constructed buildings.")
elif richter <= 4.4: print ("No destruction of buildings.")
#Task 1.11
user = input("Enter a username : ")
print ("Welcome " + user + " Please select a password")
password = input("Enter a password : ")
count = 0
while count <= 4:
if count == 4:
print ("Access denied,Please press enter to exit and contact security to reset your password")
elif (len(password)<8):
input("Password needs to be more than 8 characters, Please try again : ")
elif (len(password)>=8):
print ("Password changed successfully")
break
count += 1
#Task 1.12
for i in range(3):
for j in range(1, 4):
print (i + j, end="")
print ()
#Task 1.13
for i in range (1,6):
print("%d %d %d %d %d" % ((i**1),(i**2),(i**3),(i**4),(i**5)))
|
normal
|
{
"blob_id": "03f73a55e0a0773bbdbb0d5e29a2db598ba2e080",
"index": 149,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(calculation)\nprint(calculation2)\nprint(calculation3)\n<mask token>\nprint('Hi there, You are ' + myage + ' years old')\n<mask token>\nprint('The result is ' + result)\nprint('average: %.2f' % ((3 + 11 + 78 + 112 + 4 + 18) / 6))\n<mask token>\nprint(remainder)\n<mask token>\nprint(remainder)\n<mask token>\nprint(num3)\n<mask token>\nif userinput == 'Y':\n print('Goodbye')\nelif userinput == 'y':\n print('Goodbye')\nelse:\n print('Round 2 ~ Fight!')\n<mask token>\nif x > 0:\n print(x)\nif 1 + x > x ** sqrt(2):\n y = y + x\n<mask token>\nif x == 1:\n y += 1\nprint(x)\nprint(y)\n<mask token>\nif letterGrade >= 90:\n print('A')\nelif letterGrade >= 80:\n print('B')\nelif letterGrade >= 70:\n print('C')\nelif letterGrade >= 60:\n print('D')\nelif letterGrade <= 40:\n print('F')\n<mask token>\nif richter >= 8.0:\n print('Most structures fall')\nelif richter >= 7.0:\n print('many buildings destroyed')\nelif richter >= 6.0:\n print('Many buildings considerbly damaged, some collapse')\nelif richter >= 4.5:\n print('Damage to poorly constructed buildings.')\nelif richter <= 4.4:\n print('No destruction of buildings.')\n<mask token>\nprint('Welcome ' + user + ' Please select a password')\n<mask token>\nwhile count <= 4:\n if count == 4:\n print(\n 'Access denied,Please press enter to exit and contact security to reset your password'\n )\n elif len(password) < 8:\n input(\n 'Password needs to be more than 8 characters, Please try again : ')\n elif len(password) >= 8:\n print('Password changed successfully')\n break\ncount += 1\nfor i in range(3):\n for j in range(1, 4):\n print(i + j, end='')\n print()\nfor i in range(1, 6):\n print('%d %d %d %d %d' % (i ** 1, i ** 2, i ** 3, i ** 4, i ** 5))\n",
"step-3": "a = 13\nb = 14\ncalculation = a + 1 <= b\ncalculation2 = a + 1 >= b\ncalculation3 = a + 1 != b\nprint(calculation)\nprint(calculation2)\nprint(calculation3)\nmyage = input('How old are you : ')\nprint('Hi there, You are ' + myage + ' years old')\nnum1 = input('Enter the first number : ')\nnum2 = input('Enter the second number : ')\nresult = num1 + num2\nprint('The result is ' + result)\nprint('average: %.2f' % ((3 + 11 + 78 + 112 + 4 + 18) / 6))\nnum1 = int(input('Enter a number : '))\nremainder = num1 % 7\nprint(remainder)\nnum1 = int(input('Enter a number : '))\nremainder = num1 % 7\nprint(remainder)\nnum2 = 7\nnum3 = num1 / num2\nprint(num3)\nuserinput = input('Enter Y to quit : ')\nif userinput == 'Y':\n print('Goodbye')\nelif userinput == 'y':\n print('Goodbye')\nelse:\n print('Round 2 ~ Fight!')\nx = int(input('Enter a number : '))\nif x > 0:\n print(x)\nif 1 + x > x ** sqrt(2):\n y = y + x\nx = 1\ny = 5\nif x == 1:\n y += 1\nprint(x)\nprint(y)\nletterGrade = int(input('Enter your grade : '))\nif letterGrade >= 90:\n print('A')\nelif letterGrade >= 80:\n print('B')\nelif letterGrade >= 70:\n print('C')\nelif letterGrade >= 60:\n print('D')\nelif letterGrade <= 40:\n print('F')\nrichter = float(input('Enter magnitude on richter scale : '))\nif richter >= 8.0:\n print('Most structures fall')\nelif richter >= 7.0:\n print('many buildings destroyed')\nelif richter >= 6.0:\n print('Many buildings considerbly damaged, some collapse')\nelif richter >= 4.5:\n print('Damage to poorly constructed buildings.')\nelif richter <= 4.4:\n print('No destruction of buildings.')\nuser = input('Enter a username : ')\nprint('Welcome ' + user + ' Please select a password')\npassword = input('Enter a password : ')\ncount = 0\nwhile count <= 4:\n if count == 4:\n print(\n 'Access denied,Please press enter to exit and contact security to reset your password'\n )\n elif len(password) < 8:\n input(\n 'Password needs to be more than 8 characters, Please try again : ')\n elif len(password) >= 8:\n print('Password changed successfully')\n break\ncount += 1\nfor i in range(3):\n for j in range(1, 4):\n print(i + j, end='')\n print()\nfor i in range(1, 6):\n print('%d %d %d %d %d' % (i ** 1, i ** 2, i ** 3, i ** 4, i ** 5))\n",
"step-4": "#Week 5\n#Task 1.1\na = 13\nb = 14\ncalculation = a + 1 <=b\ncalculation2 = a + 1 >=b\ncalculation3 = a + 1 !=b\nprint (calculation)\nprint (calculation2)\nprint (calculation3)\n#Task 1.2\nmyage = input(\"How old are you : \")\nprint (\"Hi there, You are \" +myage+ \" years old\")\n#Task 1.3\nnum1 = input(\"Enter the first number : \")\nnum2 = input(\"Enter the second number : \")\nresult = num1 + num2\nprint (\"The result is \" +result)\n#Task 1.4\nprint (\"average: %.2f\" % ((3 + 11 + 78 + 112 + 4 + 18) / 6))\n#Task 1.5\nnum1 = int(input (\"Enter a number : \"))\nremainder = num1 % 7\nprint (remainder)\n#Task 1.6\nnum1 = int(input (\"Enter a number : \"))\nremainder = num1 % 7\nprint (remainder)\nnum2 = 7\nnum3 = num1 / num2\nprint (num3)\n#Task 1.8\nuserinput = input(\"Enter Y to quit : \")\nif userinput == 'Y':\n print (\"Goodbye\")\nelif userinput == 'y':\n print (\"Goodbye\")\nelse:\n print (\"Round 2 ~ Fight!\")\n#Task 1.9a\nx = int(input (\"Enter a number : \"))\nif (x) >0:\n print(x)\n#Task 1.9b\nif 1 + x > x ** sqrt(2) : y = y + x\n#Task 1.9c\nx = 1\ny = 5\nif x == 1:\n y += 1\nprint (x)\nprint (y)\n#Task 1.9d\nletterGrade = int(input(\"Enter your grade : \"))\nif letterGrade >= 90: print (\"A\")\nelif letterGrade >= 80: print (\"B\")\nelif letterGrade >= 70: print (\"C\")\nelif letterGrade >= 60: print (\"D\")\nelif letterGrade <= 40: print (\"F\")\n#Task 1.10\nrichter = float(input (\"Enter magnitude on richter scale : \"))\nif richter >= 8.0: print (\"Most structures fall\")\nelif richter >= 7.0: print (\"many buildings destroyed\")\nelif richter >= 6.0: print (\"Many buildings considerbly damaged, some collapse\")\nelif richter >= 4.5: print (\"Damage to poorly constructed buildings.\")\nelif richter <= 4.4: print (\"No destruction of buildings.\")\n#Task 1.11\nuser = input(\"Enter a username : \")\nprint (\"Welcome \" + user + \" Please select a password\")\npassword = input(\"Enter a password : \")\ncount = 0\n\n\nwhile count <= 4:\n if count == 4:\n print (\"Access denied,Please press enter to exit and contact security to reset your password\")\n elif (len(password)<8):\n input(\"Password needs to be more than 8 characters, Please try again : \")\n elif (len(password)>=8):\n print (\"Password changed successfully\")\n break\n \ncount += 1\n\n#Task 1.12\nfor i in range(3):\n for j in range(1, 4):\n print (i + j, end=\"\")\n print ()\n \n#Task 1.13\n \nfor i in range (1,6):\n print(\"%d %d %d %d %d\" % ((i**1),(i**2),(i**3),(i**4),(i**5)))\n\n\n\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# @Author: Chen yunsheng(Leo YS CHen)
# @Location: Taiwan
# @E-mail:leoyenschen@gmail.com
# @Date: 2017-02-14 00:11:27
# @Last Modified by: Chen yunsheng
import click
from qstrader import settings
from qstrader.compat import queue
from qstrader.price_parser import PriceParser
from qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from qstrader.strategy import Strategies, DisplayStrategy
from qstrader.risk_manager.example import ExampleRiskManager
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.compliance.example import ExampleCompliance
from qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler
from qstrader.statistics.simple import SimpleStatistics
from qstrader.trading_session.backtest import Backtest
#====================================================
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,dir)
print("parentdir")
print(parentdir)
print("dir")
print(dir)
from custom_strategy import CustomStrategy
from custom_position import CustomPositionSizer
def run(config, testing, tickers, filename):
# Set up variables needed for backtest
events_queue = queue.Queue()
csv_dir = config.CSV_DATA_DIR
initial_equity = PriceParser.parse(500000.00)
# Use Yahoo Daily Price Handler
price_handler = YahooDailyCsvBarPriceHandler(
csv_dir, events_queue, tickers
)
# Use the Buy and Hold Strategy
strategy = CustomStrategy(tickers, events_queue)
strategy = Strategies(strategy, DisplayStrategy())
# Use an example Position Sizer
position_sizer = CustomPositionSizer()
# Use an example Risk Manager
risk_manager = ExampleRiskManager()
# Use the default Portfolio Handler
portfolio_handler = PortfolioHandler(
initial_equity, events_queue, price_handler,
position_sizer, risk_manager
)
# Use the ExampleCompliance component
compliance = ExampleCompliance(config)
# Use a simulated IB Execution Handler
execution_handler = IBSimulatedExecutionHandler(
events_queue, price_handler, compliance
)
# Use the default Statistics
statistics = SimpleStatistics(config, portfolio_handler)
# Set up the backtest
backtest = Backtest(
price_handler, strategy,
portfolio_handler, execution_handler,
position_sizer, risk_manager,
statistics, initial_equity
)
results = backtest.simulate_trading(testing=testing)
statistics.save(filename)
return results
"""
@click.command()
@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')
@click.option('--testing/--no-testing', default=False, help='Enable testing mode')
@click.option('--tickers', default='SP500TR', help='Tickers (use comma)')
@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')
"""
def main(config, testing, tickers, filename):
tickers = tickers.split(",")
config = settings.from_file(config, testing)
run(config, testing, tickers, filename)
if __name__ == "__main__":
main(settings.DEFAULT_CONFIG_FILENAME,False,'SP500TR','')
|
normal
|
{
"blob_id": "0cec92bbfad87020baf5ef1bd005e64bc9a6ed01",
"index": 5232,
"step-1": "<mask token>\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, dir)\nprint('parentdir')\nprint(parentdir)\nprint('dir')\nprint(dir)\n<mask token>\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == '__main__':\n main(settings.DEFAULT_CONFIG_FILENAME, False, 'SP500TR', '')\n",
"step-3": "<mask token>\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, dir)\nprint('parentdir')\nprint(parentdir)\nprint('dir')\nprint(dir)\n<mask token>\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == '__main__':\n main(settings.DEFAULT_CONFIG_FILENAME, False, 'SP500TR', '')\n",
"step-4": "import click\nfrom qstrader import settings\nfrom qstrader.compat import queue\nfrom qstrader.price_parser import PriceParser\nfrom qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler\nfrom qstrader.strategy import Strategies, DisplayStrategy\nfrom qstrader.risk_manager.example import ExampleRiskManager\nfrom qstrader.portfolio_handler import PortfolioHandler\nfrom qstrader.compliance.example import ExampleCompliance\nfrom qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler\nfrom qstrader.statistics.simple import SimpleStatistics\nfrom qstrader.trading_session.backtest import Backtest\nimport os, sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, dir)\nprint('parentdir')\nprint(parentdir)\nprint('dir')\nprint(dir)\nfrom custom_strategy import CustomStrategy\nfrom custom_position import CustomPositionSizer\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == '__main__':\n main(settings.DEFAULT_CONFIG_FILENAME, False, 'SP500TR', '')\n",
"step-5": "# @Author: Chen yunsheng(Leo YS CHen)\n# @Location: Taiwan\n# @E-mail:leoyenschen@gmail.com\n# @Date: 2017-02-14 00:11:27\n# @Last Modified by: Chen yunsheng\n\nimport click\n\nfrom qstrader import settings\nfrom qstrader.compat import queue\nfrom qstrader.price_parser import PriceParser\nfrom qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler\nfrom qstrader.strategy import Strategies, DisplayStrategy\nfrom qstrader.risk_manager.example import ExampleRiskManager\nfrom qstrader.portfolio_handler import PortfolioHandler\nfrom qstrader.compliance.example import ExampleCompliance\nfrom qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler\nfrom qstrader.statistics.simple import SimpleStatistics\nfrom qstrader.trading_session.backtest import Backtest\n#====================================================\nimport os,sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0,dir)\nprint(\"parentdir\")\nprint(parentdir)\nprint(\"dir\")\nprint(dir)\nfrom custom_strategy import CustomStrategy\nfrom custom_position import CustomPositionSizer\n\ndef run(config, testing, tickers, filename):\n\n # Set up variables needed for backtest\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.00)\n\n # Use Yahoo Daily Price Handler\n price_handler = YahooDailyCsvBarPriceHandler(\n csv_dir, events_queue, tickers\n )\n\n # Use the Buy and Hold Strategy\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n\n # Use an example Position Sizer\n position_sizer = CustomPositionSizer()\n\n # Use an example Risk Manager\n risk_manager = ExampleRiskManager()\n\n # Use the default Portfolio Handler\n portfolio_handler = PortfolioHandler(\n initial_equity, events_queue, price_handler,\n position_sizer, risk_manager\n )\n\n # Use the ExampleCompliance component\n compliance = ExampleCompliance(config)\n\n # Use a simulated IB Execution Handler\n execution_handler = IBSimulatedExecutionHandler(\n events_queue, price_handler, compliance\n )\n\n # Use the default Statistics\n statistics = SimpleStatistics(config, portfolio_handler)\n\n # Set up the backtest\n backtest = Backtest(\n price_handler, strategy,\n portfolio_handler, execution_handler,\n position_sizer, risk_manager,\n statistics, initial_equity\n )\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\"\"\"\n@click.command()\n@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')\n@click.option('--testing/--no-testing', default=False, help='Enable testing mode')\n@click.option('--tickers', default='SP500TR', help='Tickers (use comma)')\n@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')\n\"\"\"\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(\",\")\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == \"__main__\":\n main(settings.DEFAULT_CONFIG_FILENAME,False,'SP500TR','')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
<|reserved_special_token_0|>
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
<|reserved_special_token_0|>
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ventana = tk.Tk()
EntryArr = []
Label = ['¿Que es la analisis psicologico?', '¿Como se lee la mente?',
'¿Cuantas persepciones psicologicas existen?',
'¿Padre de la Psicologia moderna?', 'Parte del cuerpo donde esta la psyco']
Arr3 = tk.IntVar()
opciones1 = ['1', '2', '5']
opciones2 = ['John Lenon', 'Leon Borrego', 'Jefry', 'mxrio']
opciones3 = ['Cabeza', 'mente', 'Pecho', 'corazon', 'Manos']
respuesta = dict.fromkeys(opciones3, None)
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
Arr3 = tk.IntVar()
Arr4 = tk.IntVar()
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import ttk, messagebox, Menu
ventana = tk.Tk()
EntryArr = []
Label = ['¿Que es la analisis psicologico?', '¿Como se lee la mente?',
'¿Cuantas persepciones psicologicas existen?',
'¿Padre de la Psicologia moderna?', 'Parte del cuerpo donde esta la psyco']
Arr3 = tk.IntVar()
opciones1 = ['1', '2', '5']
opciones2 = ['John Lenon', 'Leon Borrego', 'Jefry', 'mxrio']
opciones3 = ['Cabeza', 'mente', 'Pecho', 'corazon', 'Manos']
respuesta = dict.fromkeys(opciones3, None)
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
Arr3 = tk.IntVar()
Arr4 = tk.IntVar()
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
<|reserved_special_token_1|>
import tkinter as tk
from tkinter import ttk, messagebox, Menu
ventana = tk.Tk()
EntryArr = []
Label = ["¿Que es la analisis psicologico?", "¿Como se lee la mente?", "¿Cuantas persepciones psicologicas existen?", "¿Padre de la Psicologia moderna?", "Parte del cuerpo donde esta la psyco"]
Arr3 = tk.IntVar()
opciones1 = ["1", "2","5"]
opciones2 = ["John Lenon", "Leon Borrego", "Jefry", "mxrio"]
opciones3 = ["Cabeza", "mente", "Pecho", "corazon", "Manos"]
respuesta = dict.fromkeys(opciones3, None)
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ""
for x in EntryArr:
if not x.get():
messagebox.showinfo("Error","Campos no llenos")
return
else:
info += (f"{Label[i]}\t{x.get()}"+ "\n")
cal = 40
i+= 1
if(Arr3.get() == 1):
cal+= 20
if (Arr4.get() == 2):
cal+= 20
messagebox.showinfo("resultados","Tu calificaion es"+ str(cal) )
Arr3 = tk.IntVar()
Arr4 = tk.IntVar()
def edicion1():
indice = 0
for i in range(0,2):
EntryArr.append(tk.StringVar())
grid(
ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, 10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0,3):
grid(ttk.Radiobutton(ventana, text = opciones1[i], variable=Arr3, value = i), icol, 2, 5, 5)
icol +=1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0,4):
grid(ttk.Radiobutton(ventana, text = opciones2[i], variable=Arr4, value = i), icol, 3, 5, 5)
icol +=1
# Botton
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text = key, variable = respuesta[key]).grid(row = 5, column = icol)
icol = icol + 1
Botton = tk.Button(ventana, text="Aceptar", command = click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
|
flexible
|
{
"blob_id": "aeab80e2d0006ffa938366ef046d2ab3d387f88c",
"index": 1152,
"step-1": "<mask token>\n\n\ndef click():\n i = 0\n cal = 0\n info = ''\n for x in EntryArr:\n if not x.get():\n messagebox.showinfo('Error', 'Campos no llenos')\n return\n else:\n info += f'{Label[i]}\\t{x.get()}' + '\\n'\n cal = 40\n i += 1\n if Arr3.get() == 1:\n cal += 20\n if Arr4.get() == 2:\n cal += 20\n messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))\n\n\n<mask token>\n\n\ndef edicion1():\n indice = 0\n for i in range(0, 2):\n EntryArr.append(tk.StringVar())\n grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, \n 10, 10)\n grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)\n indice += 1\n grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)\n icol = 1\n Arr3 = tk.IntVar()\n for i in range(0, 3):\n grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,\n value=i), icol, 2, 5, 5)\n icol += 1\n icol = 1\n grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)\n for i in range(0, 4):\n grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,\n value=i), icol, 3, 5, 5)\n icol += 1\n grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)\n icol = 0\n for key in respuesta:\n respuesta[key] = tk.IntVar()\n ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row\n =5, column=icol)\n icol = icol + 1\n Botton = tk.Button(ventana, text='Aceptar', command=click)\n grid(Botton, 2, 10, 10, 10)\n\n\ndef main():\n edicion1()\n ventana.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef grid(Component, col, row1, padx1, pady1):\n Component.grid(column=col, row=row1, padx=padx1, pady=pady1)\n\n\ndef click():\n i = 0\n cal = 0\n info = ''\n for x in EntryArr:\n if not x.get():\n messagebox.showinfo('Error', 'Campos no llenos')\n return\n else:\n info += f'{Label[i]}\\t{x.get()}' + '\\n'\n cal = 40\n i += 1\n if Arr3.get() == 1:\n cal += 20\n if Arr4.get() == 2:\n cal += 20\n messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))\n\n\n<mask token>\n\n\ndef edicion1():\n indice = 0\n for i in range(0, 2):\n EntryArr.append(tk.StringVar())\n grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, \n 10, 10)\n grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)\n indice += 1\n grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)\n icol = 1\n Arr3 = tk.IntVar()\n for i in range(0, 3):\n grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,\n value=i), icol, 2, 5, 5)\n icol += 1\n icol = 1\n grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)\n for i in range(0, 4):\n grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,\n value=i), icol, 3, 5, 5)\n icol += 1\n grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)\n icol = 0\n for key in respuesta:\n respuesta[key] = tk.IntVar()\n ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row\n =5, column=icol)\n icol = icol + 1\n Botton = tk.Button(ventana, text='Aceptar', command=click)\n grid(Botton, 2, 10, 10, 10)\n\n\ndef main():\n edicion1()\n ventana.mainloop()\n\n\nmain()\n",
"step-3": "<mask token>\nventana = tk.Tk()\nEntryArr = []\nLabel = ['¿Que es la analisis psicologico?', '¿Como se lee la mente?',\n '¿Cuantas persepciones psicologicas existen?',\n '¿Padre de la Psicologia moderna?', 'Parte del cuerpo donde esta la psyco']\nArr3 = tk.IntVar()\nopciones1 = ['1', '2', '5']\nopciones2 = ['John Lenon', 'Leon Borrego', 'Jefry', 'mxrio']\nopciones3 = ['Cabeza', 'mente', 'Pecho', 'corazon', 'Manos']\nrespuesta = dict.fromkeys(opciones3, None)\n\n\ndef grid(Component, col, row1, padx1, pady1):\n Component.grid(column=col, row=row1, padx=padx1, pady=pady1)\n\n\ndef click():\n i = 0\n cal = 0\n info = ''\n for x in EntryArr:\n if not x.get():\n messagebox.showinfo('Error', 'Campos no llenos')\n return\n else:\n info += f'{Label[i]}\\t{x.get()}' + '\\n'\n cal = 40\n i += 1\n if Arr3.get() == 1:\n cal += 20\n if Arr4.get() == 2:\n cal += 20\n messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))\n\n\nArr3 = tk.IntVar()\nArr4 = tk.IntVar()\n\n\ndef edicion1():\n indice = 0\n for i in range(0, 2):\n EntryArr.append(tk.StringVar())\n grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, \n 10, 10)\n grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)\n indice += 1\n grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)\n icol = 1\n Arr3 = tk.IntVar()\n for i in range(0, 3):\n grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,\n value=i), icol, 2, 5, 5)\n icol += 1\n icol = 1\n grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)\n for i in range(0, 4):\n grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,\n value=i), icol, 3, 5, 5)\n icol += 1\n grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)\n icol = 0\n for key in respuesta:\n respuesta[key] = tk.IntVar()\n ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row\n =5, column=icol)\n icol = icol + 1\n Botton = tk.Button(ventana, text='Aceptar', command=click)\n grid(Botton, 2, 10, 10, 10)\n\n\ndef main():\n edicion1()\n ventana.mainloop()\n\n\nmain()\n",
"step-4": "import tkinter as tk\nfrom tkinter import ttk, messagebox, Menu\nventana = tk.Tk()\nEntryArr = []\nLabel = ['¿Que es la analisis psicologico?', '¿Como se lee la mente?',\n '¿Cuantas persepciones psicologicas existen?',\n '¿Padre de la Psicologia moderna?', 'Parte del cuerpo donde esta la psyco']\nArr3 = tk.IntVar()\nopciones1 = ['1', '2', '5']\nopciones2 = ['John Lenon', 'Leon Borrego', 'Jefry', 'mxrio']\nopciones3 = ['Cabeza', 'mente', 'Pecho', 'corazon', 'Manos']\nrespuesta = dict.fromkeys(opciones3, None)\n\n\ndef grid(Component, col, row1, padx1, pady1):\n Component.grid(column=col, row=row1, padx=padx1, pady=pady1)\n\n\ndef click():\n i = 0\n cal = 0\n info = ''\n for x in EntryArr:\n if not x.get():\n messagebox.showinfo('Error', 'Campos no llenos')\n return\n else:\n info += f'{Label[i]}\\t{x.get()}' + '\\n'\n cal = 40\n i += 1\n if Arr3.get() == 1:\n cal += 20\n if Arr4.get() == 2:\n cal += 20\n messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))\n\n\nArr3 = tk.IntVar()\nArr4 = tk.IntVar()\n\n\ndef edicion1():\n indice = 0\n for i in range(0, 2):\n EntryArr.append(tk.StringVar())\n grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, \n 10, 10)\n grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)\n indice += 1\n grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)\n icol = 1\n Arr3 = tk.IntVar()\n for i in range(0, 3):\n grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,\n value=i), icol, 2, 5, 5)\n icol += 1\n icol = 1\n grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)\n for i in range(0, 4):\n grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,\n value=i), icol, 3, 5, 5)\n icol += 1\n grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)\n icol = 0\n for key in respuesta:\n respuesta[key] = tk.IntVar()\n ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row\n =5, column=icol)\n icol = icol + 1\n Botton = tk.Button(ventana, text='Aceptar', command=click)\n grid(Botton, 2, 10, 10, 10)\n\n\ndef main():\n edicion1()\n ventana.mainloop()\n\n\nmain()\n",
"step-5": "import tkinter as tk\nfrom tkinter import ttk, messagebox, Menu\n\n\nventana = tk.Tk()\nEntryArr = []\nLabel = [\"¿Que es la analisis psicologico?\", \"¿Como se lee la mente?\", \"¿Cuantas persepciones psicologicas existen?\", \"¿Padre de la Psicologia moderna?\", \"Parte del cuerpo donde esta la psyco\"]\nArr3 = tk.IntVar()\nopciones1 = [\"1\", \"2\",\"5\"]\nopciones2 = [\"John Lenon\", \"Leon Borrego\", \"Jefry\", \"mxrio\"]\nopciones3 = [\"Cabeza\", \"mente\", \"Pecho\", \"corazon\", \"Manos\"]\nrespuesta = dict.fromkeys(opciones3, None)\n\ndef grid(Component, col, row1, padx1, pady1):\n Component.grid(column=col, row=row1, padx=padx1, pady=pady1)\n\ndef click():\n i = 0\n cal = 0\n info = \"\"\n for x in EntryArr:\n if not x.get():\n messagebox.showinfo(\"Error\",\"Campos no llenos\")\n return\n else: \n info += (f\"{Label[i]}\\t{x.get()}\"+ \"\\n\")\n cal = 40\n i+= 1\n\n if(Arr3.get() == 1):\n cal+= 20\n if (Arr4.get() == 2):\n cal+= 20\n messagebox.showinfo(\"resultados\",\"Tu calificaion es\"+ str(cal) )\n\n\n\nArr3 = tk.IntVar()\nArr4 = tk.IntVar()\n\n\n\ndef edicion1():\n \n indice = 0\n for i in range(0,2):\n EntryArr.append(tk.StringVar())\n grid(\n ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, 10, 10)\n grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)\n indice += 1\n grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)\n icol = 1\n Arr3 = tk.IntVar()\n for i in range(0,3):\n grid(ttk.Radiobutton(ventana, text = opciones1[i], variable=Arr3, value = i), icol, 2, 5, 5)\n icol +=1\n \n icol = 1\n grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)\n for i in range(0,4):\n grid(ttk.Radiobutton(ventana, text = opciones2[i], variable=Arr4, value = i), icol, 3, 5, 5)\n icol +=1\n # Botton\n grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)\n icol = 0\n for key in respuesta:\n respuesta[key] = tk.IntVar()\n ttk.Checkbutton(ventana, text = key, variable = respuesta[key]).grid(row = 5, column = icol)\n icol = icol + 1 \n\n\n Botton = tk.Button(ventana, text=\"Aceptar\", command = click)\n grid(Botton, 2, 10, 10, 10)\n\n\n\n\ndef main():\n edicion1()\n ventana.mainloop()\n\n\nmain()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import numpy as np
from load_data import load_entity, load_candidates2, load_train_data
def predict_batch(test_data, model, batch_size=None):
result = model.predict(test_data, batch_size=batch_size)
return result
def predict_data(test_data, entity_path, model, predict_path, score_path, test_path, dataset):
entity_dict, id_map = load_entity(entity_path)
acc_cnt, total_cnt = 0, 0
w_l = ''
all_score = ''
for data, labels, raw_data in test_data:
total_cnt += 1
groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]
raw_entity_list = data['entity_name']
pred_result = predict_batch(data, model, batch_size=len(labels))
pred_result = [j for r in pred_result for j in r]
pred_index = np.argmax(pred_result)
pred_label = labels[pred_index]
pred_entity_name = raw_entity_list[pred_index]
#all score
all_score += doc_id + '\t' + mention
for index, score in enumerate(pred_result):
entity_id = labels[index]
entity_name = raw_entity_list[index]
all_score += '\t' + entity_id + '\t' + entity_name + '\t' + str(round(score, 4))
all_score += '\n'
if pred_label == groud_truth:
acc_cnt += 1
else:
# write wrong results down
if groud_truth in id_map:
groud_truth = id_map[groud_truth]
ground_name = ''
if '+' in groud_truth:
ground_name = groud_truth
else:
if groud_truth not in entity_dict:
ground_name = ground_name
else:
ground_name = entity_dict[groud_truth][0]
w_l += doc_id + '\t' + mention + '\t' + groud_truth + '\t' + \
ground_name + '\t' + pred_label + '\t' + pred_entity_name + '\n'
accuracy = 1.0 * acc_cnt / (total_cnt+1)
with open(predict_path, 'w', encoding='utf8')as f:
f.write(w_l)
with open(score_path, 'w', encoding='utf8')as f:
f.write(all_score)
if dataset == 'clef':
return post_predict(test_path, score_path, entity_path)
else:
return accuracy
def post_predict(test_path, score_path, entity_path, alpha=0.75):
candidate_dict = load_candidates2(score_path)
test_data, all_data = load_train_data(test_path)
entity_dict, _ = load_entity(entity_path)
acc_cnt, w_l = 0, ''
predict_dict = dict()
for mention, candidates in candidate_dict.items():
if len(candidates) == 1:
predict_dict[mention] = (candidates[0][0], candidates[0][1])
continue
max_score, max_can = candidates[0][2], candidates[0]
for e_id, e_name, e_score in candidates:
if e_score > max_score:
max_score = e_score
max_can = (e_id, e_name, e_score)
e_id, e_name, e_score = max_can
if e_score < alpha:
e_id, e_name = 'cui-less', 'cui-less'
predict_dict[mention] = (e_id, e_name)
for doc_id, mention, label in all_data:
if str.lower(label) == 'cui-less':
label = 'cui-less'
pred_label, pred_entity_name = predict_dict[mention]
if pred_label == label:
acc_cnt += 1
else:
entity_name = 'None'
if label in entity_dict:
entity_name = entity_dict[label][0]
w_l += doc_id + '\t' + mention + '\t' + label + '\t' + \
entity_name + '\t' + pred_label + '\t' + pred_entity_name + '\n'
with open('../checkpoints/post_predict_result.txt', 'w')as f:
f.write(w_l)
total_cnt = len(all_data)
accuracy = 1.0 * acc_cnt / (total_cnt)
return accuracy
if __name__ == '__main__':
flag = 1
|
normal
|
{
"blob_id": "a19616d448da057d5be0af841467a25baaacf5b3",
"index": 9299,
"step-1": "<mask token>\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\n<mask token>\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path,\n test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(\n round(score, 4))\n all_score += '\\n'\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n elif groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' +\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n accuracy = 1.0 * acc_cnt / (total_cnt + 1)\n with open(predict_path, 'w', encoding='utf8') as f:\n f.write(w_l)\n with open(score_path, 'w', encoding='utf8') as f:\n f.write(all_score)\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path,\n test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(\n round(score, 4))\n all_score += '\\n'\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n elif groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' +\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n accuracy = 1.0 * acc_cnt / (total_cnt + 1)\n with open(predict_path, 'w', encoding='utf8') as f:\n f.write(w_l)\n with open(score_path, 'w', encoding='utf8') as f:\n f.write(all_score)\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\nif __name__ == '__main__':\n flag = 1\n",
"step-4": "import numpy as np\nfrom load_data import load_entity, load_candidates2, load_train_data\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path,\n test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(\n round(score, 4))\n all_score += '\\n'\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n elif groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' +\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n accuracy = 1.0 * acc_cnt / (total_cnt + 1)\n with open(predict_path, 'w', encoding='utf8') as f:\n f.write(w_l)\n with open(score_path, 'w', encoding='utf8') as f:\n f.write(all_score)\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n acc_cnt, w_l = 0, ''\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = candidates[0][0], candidates[0][1]\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = e_id, e_name, e_score\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = e_id, e_name\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += (doc_id + '\\t' + mention + '\\t' + label + '\\t' +\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name +\n '\\n')\n with open('../checkpoints/post_predict_result.txt', 'w') as f:\n f.write(w_l)\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / total_cnt\n return accuracy\n\n\nif __name__ == '__main__':\n flag = 1\n",
"step-5": "import numpy as np\nfrom load_data import load_entity, load_candidates2, load_train_data\n\n\ndef predict_batch(test_data, model, batch_size=None):\n result = model.predict(test_data, batch_size=batch_size)\n return result\n\n\ndef predict_data(test_data, entity_path, model, predict_path, score_path, test_path, dataset):\n entity_dict, id_map = load_entity(entity_path)\n acc_cnt, total_cnt = 0, 0\n w_l = ''\n all_score = ''\n for data, labels, raw_data in test_data:\n total_cnt += 1\n groud_truth, doc_id, mention = raw_data[0], raw_data[1], raw_data[2]\n\n raw_entity_list = data['entity_name']\n pred_result = predict_batch(data, model, batch_size=len(labels))\n pred_result = [j for r in pred_result for j in r]\n pred_index = np.argmax(pred_result)\n pred_label = labels[pred_index]\n pred_entity_name = raw_entity_list[pred_index]\n\n #all score\n all_score += doc_id + '\\t' + mention\n for index, score in enumerate(pred_result):\n entity_id = labels[index]\n entity_name = raw_entity_list[index]\n all_score += '\\t' + entity_id + '\\t' + entity_name + '\\t' + str(round(score, 4))\n all_score += '\\n'\n\n if pred_label == groud_truth:\n acc_cnt += 1\n else:\n # write wrong results down\n if groud_truth in id_map:\n groud_truth = id_map[groud_truth]\n\n ground_name = ''\n if '+' in groud_truth:\n ground_name = groud_truth\n else:\n if groud_truth not in entity_dict:\n ground_name = ground_name\n else:\n ground_name = entity_dict[groud_truth][0]\n w_l += doc_id + '\\t' + mention + '\\t' + groud_truth + '\\t' + \\\n ground_name + '\\t' + pred_label + '\\t' + pred_entity_name + '\\n'\n\n accuracy = 1.0 * acc_cnt / (total_cnt+1)\n with open(predict_path, 'w', encoding='utf8')as f:\n f.write(w_l)\n\n with open(score_path, 'w', encoding='utf8')as f:\n f.write(all_score)\n\n if dataset == 'clef':\n return post_predict(test_path, score_path, entity_path)\n else:\n return accuracy\n\n\ndef post_predict(test_path, score_path, entity_path, alpha=0.75):\n candidate_dict = load_candidates2(score_path)\n test_data, all_data = load_train_data(test_path)\n entity_dict, _ = load_entity(entity_path)\n\n acc_cnt, w_l = 0, ''\n\n predict_dict = dict()\n for mention, candidates in candidate_dict.items():\n if len(candidates) == 1:\n predict_dict[mention] = (candidates[0][0], candidates[0][1])\n continue\n max_score, max_can = candidates[0][2], candidates[0]\n for e_id, e_name, e_score in candidates:\n if e_score > max_score:\n max_score = e_score\n max_can = (e_id, e_name, e_score)\n\n e_id, e_name, e_score = max_can\n if e_score < alpha:\n e_id, e_name = 'cui-less', 'cui-less'\n predict_dict[mention] = (e_id, e_name)\n\n for doc_id, mention, label in all_data:\n if str.lower(label) == 'cui-less':\n label = 'cui-less'\n pred_label, pred_entity_name = predict_dict[mention]\n if pred_label == label:\n acc_cnt += 1\n else:\n entity_name = 'None'\n if label in entity_dict:\n entity_name = entity_dict[label][0]\n w_l += doc_id + '\\t' + mention + '\\t' + label + '\\t' + \\\n entity_name + '\\t' + pred_label + '\\t' + pred_entity_name + '\\n'\n\n with open('../checkpoints/post_predict_result.txt', 'w')as f:\n f.write(w_l)\n\n total_cnt = len(all_data)\n accuracy = 1.0 * acc_cnt / (total_cnt)\n return accuracy\n\n\nif __name__ == '__main__':\n flag = 1",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import urllib.request
username = ''
link = r'https://www.instagram.com/' + username
html = urllib.request.urlopen(link)
print(html.read())
|
normal
|
{
"blob_id": "db93de33f537eeaf64ca8e2b2b79aba1f592305b",
"index": 5434,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(html.read())\n",
"step-3": "<mask token>\nusername = ''\nlink = 'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())\n",
"step-4": "import urllib.request\nusername = ''\nlink = 'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())\n",
"step-5": "import urllib.request\n\nusername = ''\nlink = r'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestMark:
@pytest.mark.demo1
def test_case1(self):
print('testcase1')
@pytest.mark.demo1
def test_case2(self):
print('testcase1')
<|reserved_special_token_0|>
@pytest.mark.demo2
def test_case4(self):
print('testcase1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMark:
@pytest.mark.demo1
def test_case1(self):
print('testcase1')
@pytest.mark.demo1
def test_case2(self):
print('testcase1')
@pytest.mark.demo2
def test_case3(self):
print('testcase1')
@pytest.mark.demo2
def test_case4(self):
print('testcase1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMark:
@pytest.mark.demo1
def test_case1(self):
print('testcase1')
@pytest.mark.demo1
def test_case2(self):
print('testcase1')
@pytest.mark.demo2
def test_case3(self):
print('testcase1')
@pytest.mark.demo2
def test_case4(self):
print('testcase1')
if __name__ == '__main__':
pytest.main(['-v', '-s', 'test_mark.py', '-m', 'demo1'])
<|reserved_special_token_1|>
import pytest
class TestMark:
@pytest.mark.demo1
def test_case1(self):
print('testcase1')
@pytest.mark.demo1
def test_case2(self):
print('testcase1')
@pytest.mark.demo2
def test_case3(self):
print('testcase1')
@pytest.mark.demo2
def test_case4(self):
print('testcase1')
if __name__ == '__main__':
pytest.main(['-v', '-s', 'test_mark.py', '-m', 'demo1'])
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
class TestMark:
@pytest.mark.demo1
def test_case1(self):
print("testcase1")
@pytest.mark.demo1
def test_case2(self):
print("testcase1")
@pytest.mark.demo2
def test_case3(self):
print("testcase1")
@pytest.mark.demo2
def test_case4(self):
print("testcase1")
if __name__ == '__main__':
pytest.main(['-v','-s','test_mark.py','-m','demo1'])
|
flexible
|
{
"blob_id": "f49c15dca26d987e1d578790e077501a504e560b",
"index": 5814,
"step-1": "<mask token>\n\n\nclass TestMark:\n\n @pytest.mark.demo1\n def test_case1(self):\n print('testcase1')\n\n @pytest.mark.demo1\n def test_case2(self):\n print('testcase1')\n <mask token>\n\n @pytest.mark.demo2\n def test_case4(self):\n print('testcase1')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMark:\n\n @pytest.mark.demo1\n def test_case1(self):\n print('testcase1')\n\n @pytest.mark.demo1\n def test_case2(self):\n print('testcase1')\n\n @pytest.mark.demo2\n def test_case3(self):\n print('testcase1')\n\n @pytest.mark.demo2\n def test_case4(self):\n print('testcase1')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMark:\n\n @pytest.mark.demo1\n def test_case1(self):\n print('testcase1')\n\n @pytest.mark.demo1\n def test_case2(self):\n print('testcase1')\n\n @pytest.mark.demo2\n def test_case3(self):\n print('testcase1')\n\n @pytest.mark.demo2\n def test_case4(self):\n print('testcase1')\n\n\nif __name__ == '__main__':\n pytest.main(['-v', '-s', 'test_mark.py', '-m', 'demo1'])\n",
"step-4": "import pytest\n\n\nclass TestMark:\n\n @pytest.mark.demo1\n def test_case1(self):\n print('testcase1')\n\n @pytest.mark.demo1\n def test_case2(self):\n print('testcase1')\n\n @pytest.mark.demo2\n def test_case3(self):\n print('testcase1')\n\n @pytest.mark.demo2\n def test_case4(self):\n print('testcase1')\n\n\nif __name__ == '__main__':\n pytest.main(['-v', '-s', 'test_mark.py', '-m', 'demo1'])\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pytest\n\n\nclass TestMark:\n @pytest.mark.demo1\n def test_case1(self):\n print(\"testcase1\")\n\n @pytest.mark.demo1\n def test_case2(self):\n print(\"testcase1\")\n\n @pytest.mark.demo2\n def test_case3(self):\n print(\"testcase1\")\n\n @pytest.mark.demo2\n def test_case4(self):\n print(\"testcase1\")\nif __name__ == '__main__':\n pytest.main(['-v','-s','test_mark.py','-m','demo1'])",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Parent2(ABC):
form = 'Parent2 Setup: %s'
class Child(Parent, Parent2):
def __init__(self, words):
self._words = self.form % words
super(Child, self).printing()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Parent(ABC):
def printing(self):
print(self._words)
class Parent2(ABC):
form = 'Parent2 Setup: %s'
class Child(Parent, Parent2):
def __init__(self, words):
self._words = self.form % words
super(Child, self).printing()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Parent(ABC):
def printing(self):
print(self._words)
class Parent2(ABC):
form = 'Parent2 Setup: %s'
class Child(Parent, Parent2):
def __init__(self, words):
self._words = self.form % words
super(Child, self).printing()
if __name__ == '__main__':
Child('hello world')
<|reserved_special_token_1|>
from abc import ABC
class Parent(ABC):
def printing(self):
print(self._words)
class Parent2(ABC):
form = 'Parent2 Setup: %s'
class Child(Parent, Parent2):
def __init__(self, words):
self._words = self.form % words
super(Child, self).printing()
if __name__ == '__main__':
Child('hello world')
<|reserved_special_token_1|>
from abc import ABC
class Parent(ABC):
def printing(self):
print(self._words)
class Parent2(ABC):
form = "Parent2 Setup: %s"
class Child(Parent, Parent2):
def __init__(self, words):
self._words = self.form % words
super(Child, self).printing()
if __name__ == "__main__":
Child("hello world")
|
flexible
|
{
"blob_id": "9ba60270a4afcf242de53692afd8ebff7d9b37a7",
"index": 4361,
"step-1": "<mask token>\n\n\nclass Parent2(ABC):\n form = 'Parent2 Setup: %s'\n\n\nclass Child(Parent, Parent2):\n\n def __init__(self, words):\n self._words = self.form % words\n super(Child, self).printing()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Parent(ABC):\n\n def printing(self):\n print(self._words)\n\n\nclass Parent2(ABC):\n form = 'Parent2 Setup: %s'\n\n\nclass Child(Parent, Parent2):\n\n def __init__(self, words):\n self._words = self.form % words\n super(Child, self).printing()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Parent(ABC):\n\n def printing(self):\n print(self._words)\n\n\nclass Parent2(ABC):\n form = 'Parent2 Setup: %s'\n\n\nclass Child(Parent, Parent2):\n\n def __init__(self, words):\n self._words = self.form % words\n super(Child, self).printing()\n\n\nif __name__ == '__main__':\n Child('hello world')\n",
"step-4": "from abc import ABC\n\n\nclass Parent(ABC):\n\n def printing(self):\n print(self._words)\n\n\nclass Parent2(ABC):\n form = 'Parent2 Setup: %s'\n\n\nclass Child(Parent, Parent2):\n\n def __init__(self, words):\n self._words = self.form % words\n super(Child, self).printing()\n\n\nif __name__ == '__main__':\n Child('hello world')\n",
"step-5": "from abc import ABC\n\nclass Parent(ABC):\n\t\n\tdef printing(self):\n\t\tprint(self._words)\n\nclass Parent2(ABC):\n\tform = \"Parent2 Setup: %s\"\n\nclass Child(Parent, Parent2):\n\t\n\tdef __init__(self, words):\n\t\tself._words = self.form % words\n\t\tsuper(Child, self).printing()\n\t\t\nif __name__ == \"__main__\":\n\tChild(\"hello world\")\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('math.pow:', math.pow(num, power))
print('pow:', pow(num, power))
print('pow:', pow(num, power, 100))
print('fmod:', math.fmod(5, 3))
print('fmod:', math.fmod(-1e-100, 1e+100))
print('%:', -1e-100 % 1e+100)
print('exp:', math.exp(n))
print('gcd:', math.gcd(n, grad))
print('abs:', math.fabs(-num))
print('sqrt:', math.sqrt(num))
print('ceil:', math.ceil(float_num))
print('floor:', math.floor(float_num))
print('factorial:', math.factorial(num))
print('degrees:', math.degrees(rad))
print('radians:', math.radians(grad))
print('cos:', math.cos(rad))
print('sin:', math.sin(rad))
print('tan:', math.tan(rad))
print('acos:', math.acos(rad))
print('asin:', math.asin(rad))
print('atan:', math.atan(rad))
print('log:', math.log(n, base))
print('log2:', math.log2(n))
print('log10:', math.log10(n10))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
num = 8
float_num = 2.5
power = 8
rad = 0.5
grad = 90
n = 16
n10 = 1000
base = 2
print('math.pow:', math.pow(num, power))
print('pow:', pow(num, power))
print('pow:', pow(num, power, 100))
print('fmod:', math.fmod(5, 3))
print('fmod:', math.fmod(-1e-100, 1e+100))
print('%:', -1e-100 % 1e+100)
print('exp:', math.exp(n))
print('gcd:', math.gcd(n, grad))
print('abs:', math.fabs(-num))
print('sqrt:', math.sqrt(num))
print('ceil:', math.ceil(float_num))
print('floor:', math.floor(float_num))
print('factorial:', math.factorial(num))
print('degrees:', math.degrees(rad))
print('radians:', math.radians(grad))
print('cos:', math.cos(rad))
print('sin:', math.sin(rad))
print('tan:', math.tan(rad))
print('acos:', math.acos(rad))
print('asin:', math.asin(rad))
print('atan:', math.atan(rad))
print('log:', math.log(n, base))
print('log2:', math.log2(n))
print('log10:', math.log10(n10))
<|reserved_special_token_1|>
import math
num = 8
float_num = 2.5
power = 8
rad = 0.5
grad = 90
n = 16
n10 = 1000
base = 2
print('math.pow:', math.pow(num, power))
print('pow:', pow(num, power))
print('pow:', pow(num, power, 100))
print('fmod:', math.fmod(5, 3))
print('fmod:', math.fmod(-1e-100, 1e+100))
print('%:', -1e-100 % 1e+100)
print('exp:', math.exp(n))
print('gcd:', math.gcd(n, grad))
print('abs:', math.fabs(-num))
print('sqrt:', math.sqrt(num))
print('ceil:', math.ceil(float_num))
print('floor:', math.floor(float_num))
print('factorial:', math.factorial(num))
print('degrees:', math.degrees(rad))
print('radians:', math.radians(grad))
print('cos:', math.cos(rad))
print('sin:', math.sin(rad))
print('tan:', math.tan(rad))
print('acos:', math.acos(rad))
print('asin:', math.asin(rad))
print('atan:', math.atan(rad))
print('log:', math.log(n, base))
print('log2:', math.log2(n))
print('log10:', math.log10(n10))
<|reserved_special_token_1|>
# https://docs.python.org/3/library/math.html
# https://metanit.com/python/tutorial/6.2.php
# https://habr.com/ru/post/337260/
# https://habr.com/ru/post/112953/
import math
num = 8
float_num = 2.5
power = 8
rad = 0.5
grad = 90
n = 16
n10 = 1000
base = 2
print("math.pow:", math.pow(num, power)) # проблема точности после 16го знака
print("pow:", pow(num, power))
print("pow:", pow(num, power, 100))
# аналог a%b, но подходит для float
print("fmod:", math.fmod(5, 3))
print("fmod:", math.fmod(-1e-100, 1e100))
print("%:", -1e-100 % 1e100)
print("exp:", math.exp(n)) # e ** n
print("gcd:", math.gcd(n, grad))
print("abs:", math.fabs(-num))
print("sqrt:", math.sqrt(num))
print("ceil:", math.ceil(float_num))
print("floor:", math.floor(float_num))
print("factorial:", math.factorial(num))
print("degrees:", math.degrees(rad))
print("radians:", math.radians(grad))
print("cos:", math.cos(rad))
print("sin:", math.sin(rad))
print("tan:", math.tan(rad))
print("acos:", math.acos(rad))
print("asin:", math.asin(rad))
print("atan:", math.atan(rad))
print("log:", math.log(n, base))
print("log2:", math.log2(n))
print("log10:", math.log10(n10))
|
flexible
|
{
"blob_id": "17db8f7a35004a1f2bd8d098aff39928d20511da",
"index": 7026,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('math.pow:', math.pow(num, power))\nprint('pow:', pow(num, power))\nprint('pow:', pow(num, power, 100))\nprint('fmod:', math.fmod(5, 3))\nprint('fmod:', math.fmod(-1e-100, 1e+100))\nprint('%:', -1e-100 % 1e+100)\nprint('exp:', math.exp(n))\nprint('gcd:', math.gcd(n, grad))\nprint('abs:', math.fabs(-num))\nprint('sqrt:', math.sqrt(num))\nprint('ceil:', math.ceil(float_num))\nprint('floor:', math.floor(float_num))\nprint('factorial:', math.factorial(num))\nprint('degrees:', math.degrees(rad))\nprint('radians:', math.radians(grad))\nprint('cos:', math.cos(rad))\nprint('sin:', math.sin(rad))\nprint('tan:', math.tan(rad))\nprint('acos:', math.acos(rad))\nprint('asin:', math.asin(rad))\nprint('atan:', math.atan(rad))\nprint('log:', math.log(n, base))\nprint('log2:', math.log2(n))\nprint('log10:', math.log10(n10))\n",
"step-3": "<mask token>\nnum = 8\nfloat_num = 2.5\npower = 8\nrad = 0.5\ngrad = 90\nn = 16\nn10 = 1000\nbase = 2\nprint('math.pow:', math.pow(num, power))\nprint('pow:', pow(num, power))\nprint('pow:', pow(num, power, 100))\nprint('fmod:', math.fmod(5, 3))\nprint('fmod:', math.fmod(-1e-100, 1e+100))\nprint('%:', -1e-100 % 1e+100)\nprint('exp:', math.exp(n))\nprint('gcd:', math.gcd(n, grad))\nprint('abs:', math.fabs(-num))\nprint('sqrt:', math.sqrt(num))\nprint('ceil:', math.ceil(float_num))\nprint('floor:', math.floor(float_num))\nprint('factorial:', math.factorial(num))\nprint('degrees:', math.degrees(rad))\nprint('radians:', math.radians(grad))\nprint('cos:', math.cos(rad))\nprint('sin:', math.sin(rad))\nprint('tan:', math.tan(rad))\nprint('acos:', math.acos(rad))\nprint('asin:', math.asin(rad))\nprint('atan:', math.atan(rad))\nprint('log:', math.log(n, base))\nprint('log2:', math.log2(n))\nprint('log10:', math.log10(n10))\n",
"step-4": "import math\nnum = 8\nfloat_num = 2.5\npower = 8\nrad = 0.5\ngrad = 90\nn = 16\nn10 = 1000\nbase = 2\nprint('math.pow:', math.pow(num, power))\nprint('pow:', pow(num, power))\nprint('pow:', pow(num, power, 100))\nprint('fmod:', math.fmod(5, 3))\nprint('fmod:', math.fmod(-1e-100, 1e+100))\nprint('%:', -1e-100 % 1e+100)\nprint('exp:', math.exp(n))\nprint('gcd:', math.gcd(n, grad))\nprint('abs:', math.fabs(-num))\nprint('sqrt:', math.sqrt(num))\nprint('ceil:', math.ceil(float_num))\nprint('floor:', math.floor(float_num))\nprint('factorial:', math.factorial(num))\nprint('degrees:', math.degrees(rad))\nprint('radians:', math.radians(grad))\nprint('cos:', math.cos(rad))\nprint('sin:', math.sin(rad))\nprint('tan:', math.tan(rad))\nprint('acos:', math.acos(rad))\nprint('asin:', math.asin(rad))\nprint('atan:', math.atan(rad))\nprint('log:', math.log(n, base))\nprint('log2:', math.log2(n))\nprint('log10:', math.log10(n10))\n",
"step-5": "# https://docs.python.org/3/library/math.html\n# https://metanit.com/python/tutorial/6.2.php\n# https://habr.com/ru/post/337260/\n# https://habr.com/ru/post/112953/\nimport math\n\n\nnum = 8\nfloat_num = 2.5\npower = 8\nrad = 0.5\ngrad = 90\nn = 16\nn10 = 1000\nbase = 2\n\nprint(\"math.pow:\", math.pow(num, power)) # проблема точности после 16го знака\nprint(\"pow:\", pow(num, power))\nprint(\"pow:\", pow(num, power, 100))\n\n# аналог a%b, но подходит для float\nprint(\"fmod:\", math.fmod(5, 3))\nprint(\"fmod:\", math.fmod(-1e-100, 1e100))\nprint(\"%:\", -1e-100 % 1e100)\n\nprint(\"exp:\", math.exp(n)) # e ** n\n\nprint(\"gcd:\", math.gcd(n, grad))\nprint(\"abs:\", math.fabs(-num))\nprint(\"sqrt:\", math.sqrt(num))\nprint(\"ceil:\", math.ceil(float_num))\nprint(\"floor:\", math.floor(float_num))\nprint(\"factorial:\", math.factorial(num))\nprint(\"degrees:\", math.degrees(rad))\nprint(\"radians:\", math.radians(grad))\nprint(\"cos:\", math.cos(rad))\nprint(\"sin:\", math.sin(rad))\nprint(\"tan:\", math.tan(rad))\nprint(\"acos:\", math.acos(rad))\nprint(\"asin:\", math.asin(rad))\nprint(\"atan:\", math.atan(rad))\nprint(\"log:\", math.log(n, base))\nprint(\"log2:\", math.log2(n))\nprint(\"log10:\", math.log10(n10))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras='LastUpdated')
for album in albums['Albums']:
myLogger.debug(album)
title = album['Title']
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album['Category']['Name']
catid = album['Category']['id']
except KeyError:
cat = None
catid = None
try:
subCat = album['SubCategory']['Name']
subCatid = album['SubCategory']['id']
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime
.datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),
album['Key'], album['id'])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'
], Extras='MD5Sum,LastUpdated,FileName')
albumId = pictures['Album']['id']
for picture in pictures['Album']['Images']:
lock.acquire()
db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[
'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],
picture['Key'], picture['id'], picture['FileName'])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result['Categories']
ids = []
for category in categories:
ids.append(category['id'])
lock.acquire()
db.addUserCategory(conn, category['Type'], category['id'], category
['NiceName'], category['Name'])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result['SubCategories']
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn, subcategory['id'], subcategory[
'NiceName'], subcategory['Name'], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn, 'DELETE FROM smug_album')
db.execute(conn, 'DELETE FROM smug_image')
db.execute(conn, 'DELETE FROM user_category')
db.execute(conn, 'DELETE FROM user_subcategory')
lock.release()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getAllPictureInfo(configobj, smugmug, lock):
myLogger.info("getAllPictures() parent process:'{0}' process id:'{1}".
format(os.getppid(), os.getpid()))
conn = db.getConn(configobj)
myLogger.debug('Emptying smugmug tables.')
_emptySmugMugTables(conn, lock)
myLogger.debug('Getting album info from smugmug.')
albums = _getAlbums(conn, smugmug, lock)
for album in albums['Albums']:
myLogger.debug("geting picture info for album '%s'", album['Title'])
_getPictures(album, conn, smugmug, lock)
ids = _getUserCategories(conn, smugmug, lock)
_getUserSubCategories(conn, smugmug, lock, ids)
conn.close()
myLogger.info('Finished Scanning SmugMug')
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras='LastUpdated')
for album in albums['Albums']:
myLogger.debug(album)
title = album['Title']
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album['Category']['Name']
catid = album['Category']['id']
except KeyError:
cat = None
catid = None
try:
subCat = album['SubCategory']['Name']
subCatid = album['SubCategory']['id']
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime
.datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),
album['Key'], album['id'])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'
], Extras='MD5Sum,LastUpdated,FileName')
albumId = pictures['Album']['id']
for picture in pictures['Album']['Images']:
lock.acquire()
db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[
'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],
picture['Key'], picture['id'], picture['FileName'])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result['Categories']
ids = []
for category in categories:
ids.append(category['id'])
lock.acquire()
db.addUserCategory(conn, category['Type'], category['id'], category
['NiceName'], category['Name'])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result['SubCategories']
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn, subcategory['id'], subcategory[
'NiceName'], subcategory['Name'], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn, 'DELETE FROM smug_album')
db.execute(conn, 'DELETE FROM smug_image')
db.execute(conn, 'DELETE FROM user_category')
db.execute(conn, 'DELETE FROM user_subcategory')
lock.release()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myLogger = logging.getLogger('smugScan')
def getAllPictureInfo(configobj, smugmug, lock):
myLogger.info("getAllPictures() parent process:'{0}' process id:'{1}".
format(os.getppid(), os.getpid()))
conn = db.getConn(configobj)
myLogger.debug('Emptying smugmug tables.')
_emptySmugMugTables(conn, lock)
myLogger.debug('Getting album info from smugmug.')
albums = _getAlbums(conn, smugmug, lock)
for album in albums['Albums']:
myLogger.debug("geting picture info for album '%s'", album['Title'])
_getPictures(album, conn, smugmug, lock)
ids = _getUserCategories(conn, smugmug, lock)
_getUserSubCategories(conn, smugmug, lock, ids)
conn.close()
myLogger.info('Finished Scanning SmugMug')
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras='LastUpdated')
for album in albums['Albums']:
myLogger.debug(album)
title = album['Title']
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album['Category']['Name']
catid = album['Category']['id']
except KeyError:
cat = None
catid = None
try:
subCat = album['SubCategory']['Name']
subCatid = album['SubCategory']['id']
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime
.datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),
album['Key'], album['id'])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'
], Extras='MD5Sum,LastUpdated,FileName')
albumId = pictures['Album']['id']
for picture in pictures['Album']['Images']:
lock.acquire()
db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[
'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],
picture['Key'], picture['id'], picture['FileName'])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result['Categories']
ids = []
for category in categories:
ids.append(category['id'])
lock.acquire()
db.addUserCategory(conn, category['Type'], category['id'], category
['NiceName'], category['Name'])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result['SubCategories']
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn, subcategory['id'], subcategory[
'NiceName'], subcategory['Name'], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn, 'DELETE FROM smug_album')
db.execute(conn, 'DELETE FROM smug_image')
db.execute(conn, 'DELETE FROM user_category')
db.execute(conn, 'DELETE FROM user_subcategory')
lock.release()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import logging
import datetime
import os
import db
myLogger = logging.getLogger('smugScan')
def getAllPictureInfo(configobj, smugmug, lock):
myLogger.info("getAllPictures() parent process:'{0}' process id:'{1}".
format(os.getppid(), os.getpid()))
conn = db.getConn(configobj)
myLogger.debug('Emptying smugmug tables.')
_emptySmugMugTables(conn, lock)
myLogger.debug('Getting album info from smugmug.')
albums = _getAlbums(conn, smugmug, lock)
for album in albums['Albums']:
myLogger.debug("geting picture info for album '%s'", album['Title'])
_getPictures(album, conn, smugmug, lock)
ids = _getUserCategories(conn, smugmug, lock)
_getUserSubCategories(conn, smugmug, lock, ids)
conn.close()
myLogger.info('Finished Scanning SmugMug')
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras='LastUpdated')
for album in albums['Albums']:
myLogger.debug(album)
title = album['Title']
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album['Category']['Name']
catid = album['Category']['id']
except KeyError:
cat = None
catid = None
try:
subCat = album['SubCategory']['Name']
subCatid = album['SubCategory']['id']
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime
.datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),
album['Key'], album['id'])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'
], Extras='MD5Sum,LastUpdated,FileName')
albumId = pictures['Album']['id']
for picture in pictures['Album']['Images']:
lock.acquire()
db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[
'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],
picture['Key'], picture['id'], picture['FileName'])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result['Categories']
ids = []
for category in categories:
ids.append(category['id'])
lock.acquire()
db.addUserCategory(conn, category['Type'], category['id'], category
['NiceName'], category['Name'])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result['SubCategories']
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn, subcategory['id'], subcategory[
'NiceName'], subcategory['Name'], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn, 'DELETE FROM smug_album')
db.execute(conn, 'DELETE FROM smug_image')
db.execute(conn, 'DELETE FROM user_category')
db.execute(conn, 'DELETE FROM user_subcategory')
lock.release()
<|reserved_special_token_1|>
'''
Copyright (c) 2011 Jacob K. Schoen (jacob.schoen@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
import datetime
import os
import db
myLogger = logging.getLogger('smugScan')
def getAllPictureInfo(configobj, smugmug, lock):
myLogger.info("getAllPictures() parent process:'{0}' process id:'{1}".format(os.getppid(),os.getpid()))
conn = db.getConn(configobj)
#start fresh on this
myLogger.debug("Emptying smugmug tables.")
_emptySmugMugTables(conn, lock)
#now get the albums
myLogger.debug("Getting album info from smugmug.")
albums = _getAlbums(conn, smugmug, lock)
for album in albums["Albums"]:
#and the pictures in each album
myLogger.debug("geting picture info for album '%s'", album["Title"])
_getPictures(album, conn, smugmug, lock)
#get categories
ids = _getUserCategories(conn, smugmug, lock)
_getUserSubCategories(conn, smugmug, lock, ids)
conn.close()
myLogger.info('Finished Scanning SmugMug')
def _getAlbums(conn, smugmug, lock):
albums = smugmug.albums_get(Extras="LastUpdated")
for album in albums["Albums"]:
myLogger.debug(album)
title = album["Title"]
cat = None
catid = None
subCat = None
subCatid = None
try:
cat = album["Category"]["Name"]
catid = album["Category"]["id"]
except KeyError:
cat = None
catid = None
try:
subCat = album["SubCategory"]["Name"]
subCatid = album["SubCategory"]["id"]
except KeyError:
subCat = None
subCatid = None
lock.acquire()
db.addSmugAlbum(conn,cat, catid, subCat, subCatid, title, datetime.datetime.strptime(album["LastUpdated"],'%Y-%m-%d %H:%M:%S'), album["Key"], album["id"])
lock.release()
return albums
def _getPictures(album, conn, smugmug, lock):
pictures = smugmug.images_get(AlbumID=album["id"], AlbumKey=album["Key"], Extras="MD5Sum,LastUpdated,FileName")
albumId = pictures["Album"]["id"]
for picture in pictures["Album"]["Images"]:
lock.acquire()
db.addSmugImage(conn,albumId, datetime.datetime.strptime(picture["LastUpdated"],'%Y-%m-%d %H:%M:%S'), picture["MD5Sum"], picture["Key"], picture["id"], picture["FileName"])
lock.release()
def _getUserCategories(conn, smugmug, lock):
result = smugmug.categories_get()
categories = result["Categories"]
ids = []
for category in categories:
ids.append(category["id"])
lock.acquire()
db.addUserCategory(conn,category["Type"],category["id"],category["NiceName"],category["Name"])
lock.release()
return ids
def _getUserSubCategories(conn, smugmug, lock, ids):
for categoryid in ids:
result = smugmug.subcategories_get(CategoryID=categoryid)
subcategories = result["SubCategories"]
for subcategory in subcategories:
lock.acquire()
db.addUserSubCategory(conn,subcategory["id"],subcategory["NiceName"],subcategory["Name"], categoryid)
lock.release()
def _emptySmugMugTables(conn, lock):
lock.acquire()
db.execute(conn,"DELETE FROM smug_album")
db.execute(conn,"DELETE FROM smug_image")
db.execute(conn,"DELETE FROM user_category")
db.execute(conn,"DELETE FROM user_subcategory")
lock.release()
|
flexible
|
{
"blob_id": "e2e3b63deba20cd87fdfca81a9f67fa24891a1e0",
"index": 6416,
"step-1": "<mask token>\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-2": "<mask token>\n\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".\n format(os.getppid(), os.getpid()))\n conn = db.getConn(configobj)\n myLogger.debug('Emptying smugmug tables.')\n _emptySmugMugTables(conn, lock)\n myLogger.debug('Getting album info from smugmug.')\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums['Albums']:\n myLogger.debug(\"geting picture info for album '%s'\", album['Title'])\n _getPictures(album, conn, smugmug, lock)\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-3": "<mask token>\nmyLogger = logging.getLogger('smugScan')\n\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".\n format(os.getppid(), os.getpid()))\n conn = db.getConn(configobj)\n myLogger.debug('Emptying smugmug tables.')\n _emptySmugMugTables(conn, lock)\n myLogger.debug('Getting album info from smugmug.')\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums['Albums']:\n myLogger.debug(\"geting picture info for album '%s'\", album['Title'])\n _getPictures(album, conn, smugmug, lock)\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-4": "<mask token>\nimport logging\nimport datetime\nimport os\nimport db\nmyLogger = logging.getLogger('smugScan')\n\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".\n format(os.getppid(), os.getpid()))\n conn = db.getConn(configobj)\n myLogger.debug('Emptying smugmug tables.')\n _emptySmugMugTables(conn, lock)\n myLogger.debug('Getting album info from smugmug.')\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums['Albums']:\n myLogger.debug(\"geting picture info for album '%s'\", album['Title'])\n _getPictures(album, conn, smugmug, lock)\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras='LastUpdated')\n for album in albums['Albums']:\n myLogger.debug(album)\n title = album['Title']\n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album['Category']['Name']\n catid = album['Category']['id']\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album['SubCategory']['Name']\n subCatid = album['SubCategory']['id']\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn, cat, catid, subCat, subCatid, title, datetime\n .datetime.strptime(album['LastUpdated'], '%Y-%m-%d %H:%M:%S'),\n album['Key'], album['id'])\n lock.release()\n return albums\n\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album['id'], AlbumKey=album['Key'\n ], Extras='MD5Sum,LastUpdated,FileName')\n albumId = pictures['Album']['id']\n for picture in pictures['Album']['Images']:\n lock.acquire()\n db.addSmugImage(conn, albumId, datetime.datetime.strptime(picture[\n 'LastUpdated'], '%Y-%m-%d %H:%M:%S'), picture['MD5Sum'],\n picture['Key'], picture['id'], picture['FileName'])\n lock.release()\n\n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result['Categories']\n ids = []\n for category in categories:\n ids.append(category['id'])\n lock.acquire()\n db.addUserCategory(conn, category['Type'], category['id'], category\n ['NiceName'], category['Name'])\n lock.release()\n return ids\n\n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result['SubCategories']\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn, subcategory['id'], subcategory[\n 'NiceName'], subcategory['Name'], categoryid)\n lock.release()\n\n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn, 'DELETE FROM smug_album')\n db.execute(conn, 'DELETE FROM smug_image')\n db.execute(conn, 'DELETE FROM user_category')\n db.execute(conn, 'DELETE FROM user_subcategory')\n lock.release()\n",
"step-5": "'''\nCopyright (c) 2011 Jacob K. Schoen (jacob.schoen@gmail.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in \nthe Software without restriction, including without limitation the rights to \nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies \nof the Software, and to permit persons to whom the Software is furnished to do \nso, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all \ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \nSOFTWARE.\n'''\n\nimport logging\nimport datetime\nimport os\n\nimport db\n\nmyLogger = logging.getLogger('smugScan')\n\ndef getAllPictureInfo(configobj, smugmug, lock):\n myLogger.info(\"getAllPictures() parent process:'{0}' process id:'{1}\".format(os.getppid(),os.getpid()))\n conn = db.getConn(configobj)\n #start fresh on this\n myLogger.debug(\"Emptying smugmug tables.\")\n _emptySmugMugTables(conn, lock)\n \n #now get the albums \n myLogger.debug(\"Getting album info from smugmug.\")\n albums = _getAlbums(conn, smugmug, lock)\n for album in albums[\"Albums\"]:\n #and the pictures in each album\n myLogger.debug(\"geting picture info for album '%s'\", album[\"Title\"])\n _getPictures(album, conn, smugmug, lock)\n \n #get categories\n ids = _getUserCategories(conn, smugmug, lock)\n _getUserSubCategories(conn, smugmug, lock, ids)\n conn.close()\n myLogger.info('Finished Scanning SmugMug')\n\ndef _getAlbums(conn, smugmug, lock):\n albums = smugmug.albums_get(Extras=\"LastUpdated\")\n \n for album in albums[\"Albums\"]:\n myLogger.debug(album)\n title = album[\"Title\"]\n \n cat = None\n catid = None\n subCat = None\n subCatid = None\n try:\n cat = album[\"Category\"][\"Name\"]\n catid = album[\"Category\"][\"id\"]\n except KeyError:\n cat = None\n catid = None\n try:\n subCat = album[\"SubCategory\"][\"Name\"]\n subCatid = album[\"SubCategory\"][\"id\"]\n except KeyError:\n subCat = None\n subCatid = None\n lock.acquire()\n db.addSmugAlbum(conn,cat, catid, subCat, subCatid, title, datetime.datetime.strptime(album[\"LastUpdated\"],'%Y-%m-%d %H:%M:%S'), album[\"Key\"], album[\"id\"])\n lock.release() \n return albums\n\ndef _getPictures(album, conn, smugmug, lock):\n pictures = smugmug.images_get(AlbumID=album[\"id\"], AlbumKey=album[\"Key\"], Extras=\"MD5Sum,LastUpdated,FileName\")\n albumId = pictures[\"Album\"][\"id\"]\n for picture in pictures[\"Album\"][\"Images\"]:\n lock.acquire()\n db.addSmugImage(conn,albumId, datetime.datetime.strptime(picture[\"LastUpdated\"],'%Y-%m-%d %H:%M:%S'), picture[\"MD5Sum\"], picture[\"Key\"], picture[\"id\"], picture[\"FileName\"])\n lock.release() \n\ndef _getUserCategories(conn, smugmug, lock):\n result = smugmug.categories_get()\n categories = result[\"Categories\"]\n ids = []\n for category in categories:\n ids.append(category[\"id\"])\n lock.acquire()\n db.addUserCategory(conn,category[\"Type\"],category[\"id\"],category[\"NiceName\"],category[\"Name\"])\n lock.release() \n return ids \n\ndef _getUserSubCategories(conn, smugmug, lock, ids):\n for categoryid in ids:\n result = smugmug.subcategories_get(CategoryID=categoryid)\n subcategories = result[\"SubCategories\"]\n for subcategory in subcategories:\n lock.acquire()\n db.addUserSubCategory(conn,subcategory[\"id\"],subcategory[\"NiceName\"],subcategory[\"Name\"], categoryid)\n lock.release() \n\ndef _emptySmugMugTables(conn, lock):\n lock.acquire()\n db.execute(conn,\"DELETE FROM smug_album\")\n db.execute(conn,\"DELETE FROM smug_image\")\n db.execute(conn,\"DELETE FROM user_category\")\n db.execute(conn,\"DELETE FROM user_subcategory\")\n lock.release()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import os
import time
import torch
from torch.utils.data import DataLoader
from torchvision.datasets import SVHN
from torchvision.transforms import ToTensor
from lib.utils import Logger, normal_logpdf, sumflat, print_model_info, tanh_to_uint8, get_optimizer
from lib.vae import VAE
def train(hp):
os.makedirs(hp.out_dir, exist_ok=True)
device = torch.device('cuda' if hp.use_cuda else 'cpu')
dataset = SVHN(root='svhn', split='train', download=True, transform=ToTensor())
eval_dataset = SVHN(root='svhn', split='test', download=True, transform=ToTensor())
model = VAE(hp.z_dim).to(device)
print_model_info(model)
opt = get_optimizer(hp.opt_name, model.parameters(), lr=hp.lr, **hp.opt_kwargs)
logger = Logger(hp.out_dir)
total_step = 0
error_occured = False
start_time = time.time()
stats = {
'loss': [],
'loss_kl': [],
'loss_rec': [],
'eval_loss': [],
'start_time': start_time,
'epoch_times': [],
}
for epoch in range(1, hp.epochs+1):
loader = DataLoader(dataset=dataset, batch_size=256, shuffle=True)
for x, _ in loader:
total_step += 1
x = x.to(device) * 2 - 1.0
z, mu, sigma, x_hat = model(x)
loss_rec = 0.5 * sumflat((x - x_hat) ** 2)
loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)
loss = (loss_rec + loss_kl).mean()
if torch.isnan(loss).item():
error_occured = True
break
opt.zero_grad()
loss.backward()
opt.step()
if total_step % 10 == 0:
stats['loss'].append(loss.cpu().item())
stats['loss_rec'].append(loss_rec.cpu().mean().item())
stats['loss_kl'].append(loss_kl.cpu().mean().item())
logger.log_scalars({
'train/loss': stats['loss'][-1],
'train/loss_rec': stats['loss_rec'][-1],
'train/loss_kl': stats['loss_kl'][-1],
}, total_step)
print(f'\rep {epoch:02d} step {total_step:03d} '
f'loss {stats["loss"][-1]:.2f} '
f'loss_rec {stats["loss_rec"][-1]:.2f} '
f'loss_kl {stats["loss_kl"][-1]:.2f} '
f'({time.time() - start_time:.2f} sec) '
' ',
end='', flush=True)
print()
if error_occured:
print('NaN detected -- Ending training!')
break
stats['epoch_times'].append(time.time())
eval_loss = evaluate(model=model, dataset=eval_dataset, logger=logger,
step=total_step, epoch=epoch, device=device, hparams=hp)
stats['eval_loss'].append(eval_loss.cpu().mean().item())
if epoch % hp.ckpt_freq == 0 or epoch == hp.epochs:
torch.save(
{
'model_state_dict': model.state_dict(),
'epoch': epoch,
'total_step': total_step,
'stats': stats,
'hparams': vars(hp),
},
os.path.join(hp.out_dir, f'ckpt_ep={epoch:03d}.pt'))
end_time = time.time()
with open(os.path.join(hp.out_dir, 'FINISHED'), 'w') as f:
f.write(f'Started: {start_time}\n')
f.write(f'Finished: {end_time}\n')
f.write(f'Total time: {end_time - start_time:.2f}\n')
@torch.no_grad()
def evaluate(*, model: torch.nn.Module, dataset, logger: Logger, step: int, epoch: int, device, hparams):
loader = DataLoader(dataset=dataset, batch_size=256, shuffle=False, drop_last=False)
model.eval()
losses = []
for i, (x, _) in enumerate(loader):
x = x.to(device) * 2 - 1.0
z, mu, sigma, x_hat = model(x)
loss_rec = 0.5 * sumflat((x - x_hat) ** 2)
loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)
loss = loss_rec + loss_kl
losses.append(loss.cpu())
if i == 0 and (epoch % hparams.sample_freq == 0 or epoch == hparams.epochs):
n = 6
samples = model.decoder(torch.randn(n**2, hparams.z_dim, device=device))
logger.log_image_grid('reconstructions', tanh_to_uint8(x_hat[:n**2]), step, nrow=n)
logger.log_image_grid('samples', tanh_to_uint8(samples), step, nrow=n)
losses = torch.cat(losses)
logger.log_scalar('eval/loss', losses.mean().item(), step)
model.train()
return losses
|
normal
|
{
"blob_id": "43db8ed10face1c668aeadd3cbc5b13f87fb0126",
"index": 4997,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef train(hp):\n os.makedirs(hp.out_dir, exist_ok=True)\n device = torch.device('cuda' if hp.use_cuda else 'cpu')\n dataset = SVHN(root='svhn', split='train', download=True, transform=\n ToTensor())\n eval_dataset = SVHN(root='svhn', split='test', download=True, transform\n =ToTensor())\n model = VAE(hp.z_dim).to(device)\n print_model_info(model)\n opt = get_optimizer(hp.opt_name, model.parameters(), lr=hp.lr, **hp.\n opt_kwargs)\n logger = Logger(hp.out_dir)\n total_step = 0\n error_occured = False\n start_time = time.time()\n stats = {'loss': [], 'loss_kl': [], 'loss_rec': [], 'eval_loss': [],\n 'start_time': start_time, 'epoch_times': []}\n for epoch in range(1, hp.epochs + 1):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=True)\n for x, _ in loader:\n total_step += 1\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = (loss_rec + loss_kl).mean()\n if torch.isnan(loss).item():\n error_occured = True\n break\n opt.zero_grad()\n loss.backward()\n opt.step()\n if total_step % 10 == 0:\n stats['loss'].append(loss.cpu().item())\n stats['loss_rec'].append(loss_rec.cpu().mean().item())\n stats['loss_kl'].append(loss_kl.cpu().mean().item())\n logger.log_scalars({'train/loss': stats['loss'][-1],\n 'train/loss_rec': stats['loss_rec'][-1],\n 'train/loss_kl': stats['loss_kl'][-1]}, total_step)\n print(\n f\"\\rep {epoch:02d} step {total_step:03d} loss {stats['loss'][-1]:.2f} loss_rec {stats['loss_rec'][-1]:.2f} loss_kl {stats['loss_kl'][-1]:.2f} ({time.time() - start_time:.2f} sec) \"\n , end='', flush=True)\n print()\n if error_occured:\n print('NaN detected -- Ending training!')\n break\n stats['epoch_times'].append(time.time())\n eval_loss = evaluate(model=model, dataset=eval_dataset, logger=\n logger, step=total_step, epoch=epoch, device=device, hparams=hp)\n stats['eval_loss'].append(eval_loss.cpu().mean().item())\n if epoch % hp.ckpt_freq == 0 or epoch == hp.epochs:\n torch.save({'model_state_dict': model.state_dict(), 'epoch':\n epoch, 'total_step': total_step, 'stats': stats, 'hparams':\n vars(hp)}, os.path.join(hp.out_dir, f'ckpt_ep={epoch:03d}.pt'))\n end_time = time.time()\n with open(os.path.join(hp.out_dir, 'FINISHED'), 'w') as f:\n f.write(f'Started: {start_time}\\n')\n f.write(f'Finished: {end_time}\\n')\n f.write(f'Total time: {end_time - start_time:.2f}\\n')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef train(hp):\n os.makedirs(hp.out_dir, exist_ok=True)\n device = torch.device('cuda' if hp.use_cuda else 'cpu')\n dataset = SVHN(root='svhn', split='train', download=True, transform=\n ToTensor())\n eval_dataset = SVHN(root='svhn', split='test', download=True, transform\n =ToTensor())\n model = VAE(hp.z_dim).to(device)\n print_model_info(model)\n opt = get_optimizer(hp.opt_name, model.parameters(), lr=hp.lr, **hp.\n opt_kwargs)\n logger = Logger(hp.out_dir)\n total_step = 0\n error_occured = False\n start_time = time.time()\n stats = {'loss': [], 'loss_kl': [], 'loss_rec': [], 'eval_loss': [],\n 'start_time': start_time, 'epoch_times': []}\n for epoch in range(1, hp.epochs + 1):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=True)\n for x, _ in loader:\n total_step += 1\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = (loss_rec + loss_kl).mean()\n if torch.isnan(loss).item():\n error_occured = True\n break\n opt.zero_grad()\n loss.backward()\n opt.step()\n if total_step % 10 == 0:\n stats['loss'].append(loss.cpu().item())\n stats['loss_rec'].append(loss_rec.cpu().mean().item())\n stats['loss_kl'].append(loss_kl.cpu().mean().item())\n logger.log_scalars({'train/loss': stats['loss'][-1],\n 'train/loss_rec': stats['loss_rec'][-1],\n 'train/loss_kl': stats['loss_kl'][-1]}, total_step)\n print(\n f\"\\rep {epoch:02d} step {total_step:03d} loss {stats['loss'][-1]:.2f} loss_rec {stats['loss_rec'][-1]:.2f} loss_kl {stats['loss_kl'][-1]:.2f} ({time.time() - start_time:.2f} sec) \"\n , end='', flush=True)\n print()\n if error_occured:\n print('NaN detected -- Ending training!')\n break\n stats['epoch_times'].append(time.time())\n eval_loss = evaluate(model=model, dataset=eval_dataset, logger=\n logger, step=total_step, epoch=epoch, device=device, hparams=hp)\n stats['eval_loss'].append(eval_loss.cpu().mean().item())\n if epoch % hp.ckpt_freq == 0 or epoch == hp.epochs:\n torch.save({'model_state_dict': model.state_dict(), 'epoch':\n epoch, 'total_step': total_step, 'stats': stats, 'hparams':\n vars(hp)}, os.path.join(hp.out_dir, f'ckpt_ep={epoch:03d}.pt'))\n end_time = time.time()\n with open(os.path.join(hp.out_dir, 'FINISHED'), 'w') as f:\n f.write(f'Started: {start_time}\\n')\n f.write(f'Finished: {end_time}\\n')\n f.write(f'Total time: {end_time - start_time:.2f}\\n')\n\n\n@torch.no_grad()\ndef evaluate(*, model: torch.nn.Module, dataset, logger: Logger, step: int,\n epoch: int, device, hparams):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=False,\n drop_last=False)\n model.eval()\n losses = []\n for i, (x, _) in enumerate(loader):\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = loss_rec + loss_kl\n losses.append(loss.cpu())\n if i == 0 and (epoch % hparams.sample_freq == 0 or epoch == hparams\n .epochs):\n n = 6\n samples = model.decoder(torch.randn(n ** 2, hparams.z_dim,\n device=device))\n logger.log_image_grid('reconstructions', tanh_to_uint8(x_hat[:n **\n 2]), step, nrow=n)\n logger.log_image_grid('samples', tanh_to_uint8(samples), step,\n nrow=n)\n losses = torch.cat(losses)\n logger.log_scalar('eval/loss', losses.mean().item(), step)\n model.train()\n return losses\n",
"step-4": "import os\nimport time\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import SVHN\nfrom torchvision.transforms import ToTensor\nfrom lib.utils import Logger, normal_logpdf, sumflat, print_model_info, tanh_to_uint8, get_optimizer\nfrom lib.vae import VAE\n\n\ndef train(hp):\n os.makedirs(hp.out_dir, exist_ok=True)\n device = torch.device('cuda' if hp.use_cuda else 'cpu')\n dataset = SVHN(root='svhn', split='train', download=True, transform=\n ToTensor())\n eval_dataset = SVHN(root='svhn', split='test', download=True, transform\n =ToTensor())\n model = VAE(hp.z_dim).to(device)\n print_model_info(model)\n opt = get_optimizer(hp.opt_name, model.parameters(), lr=hp.lr, **hp.\n opt_kwargs)\n logger = Logger(hp.out_dir)\n total_step = 0\n error_occured = False\n start_time = time.time()\n stats = {'loss': [], 'loss_kl': [], 'loss_rec': [], 'eval_loss': [],\n 'start_time': start_time, 'epoch_times': []}\n for epoch in range(1, hp.epochs + 1):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=True)\n for x, _ in loader:\n total_step += 1\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = (loss_rec + loss_kl).mean()\n if torch.isnan(loss).item():\n error_occured = True\n break\n opt.zero_grad()\n loss.backward()\n opt.step()\n if total_step % 10 == 0:\n stats['loss'].append(loss.cpu().item())\n stats['loss_rec'].append(loss_rec.cpu().mean().item())\n stats['loss_kl'].append(loss_kl.cpu().mean().item())\n logger.log_scalars({'train/loss': stats['loss'][-1],\n 'train/loss_rec': stats['loss_rec'][-1],\n 'train/loss_kl': stats['loss_kl'][-1]}, total_step)\n print(\n f\"\\rep {epoch:02d} step {total_step:03d} loss {stats['loss'][-1]:.2f} loss_rec {stats['loss_rec'][-1]:.2f} loss_kl {stats['loss_kl'][-1]:.2f} ({time.time() - start_time:.2f} sec) \"\n , end='', flush=True)\n print()\n if error_occured:\n print('NaN detected -- Ending training!')\n break\n stats['epoch_times'].append(time.time())\n eval_loss = evaluate(model=model, dataset=eval_dataset, logger=\n logger, step=total_step, epoch=epoch, device=device, hparams=hp)\n stats['eval_loss'].append(eval_loss.cpu().mean().item())\n if epoch % hp.ckpt_freq == 0 or epoch == hp.epochs:\n torch.save({'model_state_dict': model.state_dict(), 'epoch':\n epoch, 'total_step': total_step, 'stats': stats, 'hparams':\n vars(hp)}, os.path.join(hp.out_dir, f'ckpt_ep={epoch:03d}.pt'))\n end_time = time.time()\n with open(os.path.join(hp.out_dir, 'FINISHED'), 'w') as f:\n f.write(f'Started: {start_time}\\n')\n f.write(f'Finished: {end_time}\\n')\n f.write(f'Total time: {end_time - start_time:.2f}\\n')\n\n\n@torch.no_grad()\ndef evaluate(*, model: torch.nn.Module, dataset, logger: Logger, step: int,\n epoch: int, device, hparams):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=False,\n drop_last=False)\n model.eval()\n losses = []\n for i, (x, _) in enumerate(loader):\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = loss_rec + loss_kl\n losses.append(loss.cpu())\n if i == 0 and (epoch % hparams.sample_freq == 0 or epoch == hparams\n .epochs):\n n = 6\n samples = model.decoder(torch.randn(n ** 2, hparams.z_dim,\n device=device))\n logger.log_image_grid('reconstructions', tanh_to_uint8(x_hat[:n **\n 2]), step, nrow=n)\n logger.log_image_grid('samples', tanh_to_uint8(samples), step,\n nrow=n)\n losses = torch.cat(losses)\n logger.log_scalar('eval/loss', losses.mean().item(), step)\n model.train()\n return losses\n",
"step-5": "import os\nimport time\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets import SVHN\nfrom torchvision.transforms import ToTensor \nfrom lib.utils import Logger, normal_logpdf, sumflat, print_model_info, tanh_to_uint8, get_optimizer\nfrom lib.vae import VAE\n\n\ndef train(hp):\n os.makedirs(hp.out_dir, exist_ok=True)\n device = torch.device('cuda' if hp.use_cuda else 'cpu')\n dataset = SVHN(root='svhn', split='train', download=True, transform=ToTensor())\n eval_dataset = SVHN(root='svhn', split='test', download=True, transform=ToTensor())\n model = VAE(hp.z_dim).to(device)\n print_model_info(model)\n opt = get_optimizer(hp.opt_name, model.parameters(), lr=hp.lr, **hp.opt_kwargs)\n logger = Logger(hp.out_dir)\n total_step = 0\n error_occured = False\n\n start_time = time.time()\n stats = {\n 'loss': [],\n 'loss_kl': [],\n 'loss_rec': [],\n 'eval_loss': [],\n 'start_time': start_time,\n 'epoch_times': [],\n }\n for epoch in range(1, hp.epochs+1):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=True)\n for x, _ in loader:\n total_step += 1\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = (loss_rec + loss_kl).mean()\n if torch.isnan(loss).item():\n error_occured = True\n break\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n if total_step % 10 == 0:\n stats['loss'].append(loss.cpu().item())\n stats['loss_rec'].append(loss_rec.cpu().mean().item())\n stats['loss_kl'].append(loss_kl.cpu().mean().item())\n logger.log_scalars({\n 'train/loss': stats['loss'][-1],\n 'train/loss_rec': stats['loss_rec'][-1],\n 'train/loss_kl': stats['loss_kl'][-1],\n\n }, total_step)\n\n print(f'\\rep {epoch:02d} step {total_step:03d} '\n f'loss {stats[\"loss\"][-1]:.2f} '\n f'loss_rec {stats[\"loss_rec\"][-1]:.2f} '\n f'loss_kl {stats[\"loss_kl\"][-1]:.2f} '\n f'({time.time() - start_time:.2f} sec) '\n ' ',\n end='', flush=True)\n\n print()\n if error_occured:\n print('NaN detected -- Ending training!')\n break\n stats['epoch_times'].append(time.time())\n eval_loss = evaluate(model=model, dataset=eval_dataset, logger=logger,\n step=total_step, epoch=epoch, device=device, hparams=hp)\n stats['eval_loss'].append(eval_loss.cpu().mean().item())\n\n if epoch % hp.ckpt_freq == 0 or epoch == hp.epochs:\n torch.save(\n {\n 'model_state_dict': model.state_dict(),\n 'epoch': epoch,\n 'total_step': total_step,\n 'stats': stats,\n 'hparams': vars(hp),\n },\n os.path.join(hp.out_dir, f'ckpt_ep={epoch:03d}.pt'))\n\n end_time = time.time()\n with open(os.path.join(hp.out_dir, 'FINISHED'), 'w') as f:\n f.write(f'Started: {start_time}\\n')\n f.write(f'Finished: {end_time}\\n')\n f.write(f'Total time: {end_time - start_time:.2f}\\n')\n\n\n@torch.no_grad()\ndef evaluate(*, model: torch.nn.Module, dataset, logger: Logger, step: int, epoch: int, device, hparams):\n loader = DataLoader(dataset=dataset, batch_size=256, shuffle=False, drop_last=False)\n\n model.eval()\n losses = []\n for i, (x, _) in enumerate(loader):\n x = x.to(device) * 2 - 1.0\n z, mu, sigma, x_hat = model(x)\n\n loss_rec = 0.5 * sumflat((x - x_hat) ** 2)\n loss_kl = normal_logpdf(z, mu, sigma) - normal_logpdf(z)\n loss = loss_rec + loss_kl\n losses.append(loss.cpu())\n\n if i == 0 and (epoch % hparams.sample_freq == 0 or epoch == hparams.epochs):\n n = 6\n samples = model.decoder(torch.randn(n**2, hparams.z_dim, device=device))\n logger.log_image_grid('reconstructions', tanh_to_uint8(x_hat[:n**2]), step, nrow=n)\n logger.log_image_grid('samples', tanh_to_uint8(samples), step, nrow=n)\n\n losses = torch.cat(losses)\n logger.log_scalar('eval/loss', losses.mean().item(), step)\n model.train()\n return losses",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Queue:
def __init__(self):
self.head = None
self.tail = None
class Node:
def __init__(self, data):
self.data = data
self.next = None
def isEmpty(self):
return self.head is None
def peek(self):
return self.head.data if self.head is not None else None
def add(self, data):
node = self.Node(data)
if(self.tail is not None):
self.tail.next = node
self.tail = node
if (self.head is None):
self.head = node
def remove(self):
data = self.head.data
self.head = self.head.next
if (self.head is None):
self.tail = None
return data
|
normal
|
{
"blob_id": "1aca1cf11d64374d0e0786e74c16567a4c5a1dec",
"index": 6452,
"step-1": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n <mask token>\n\n def peek(self):\n return self.head.data if self.head is not None else None\n <mask token>\n <mask token>\n",
"step-2": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n <mask token>\n\n def peek(self):\n return self.head.data if self.head is not None else None\n <mask token>\n\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if self.head is None:\n self.tail = None\n return data\n",
"step-3": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n <mask token>\n\n def peek(self):\n return self.head.data if self.head is not None else None\n\n def add(self, data):\n node = self.Node(data)\n if self.tail is not None:\n self.tail.next = node\n self.tail = node\n if self.head is None:\n self.head = node\n\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if self.head is None:\n self.tail = None\n return data\n",
"step-4": "class Queue:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n\n class Node:\n\n def __init__(self, data):\n self.data = data\n self.next = None\n\n def isEmpty(self):\n return self.head is None\n\n def peek(self):\n return self.head.data if self.head is not None else None\n\n def add(self, data):\n node = self.Node(data)\n if self.tail is not None:\n self.tail.next = node\n self.tail = node\n if self.head is None:\n self.head = node\n\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if self.head is None:\n self.tail = None\n return data\n",
"step-5": "class Queue:\n def __init__(self):\n self.head = None\n self.tail = None\n \n class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n def isEmpty(self):\n return self.head is None\n def peek(self):\n return self.head.data if self.head is not None else None\n def add(self, data):\n node = self.Node(data)\n if(self.tail is not None):\n self.tail.next = node\n self.tail = node\n if (self.head is None):\n self.head = node\n def remove(self):\n data = self.head.data\n self.head = self.head.next\n if (self.head is None):\n self.tail = None\n return data\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class Bus:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getOn_2(self, *names):
str_names = str(names)
str_names.strip('')
list_names = str_names.split(' ')
for i in list_names:
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {i}. There is no free seat on the bus')
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index('Free')) + 1
self.dict_seats.update({free_num_seat: i})
def getOf(self, passenger_name=None):
self.num_passenger -= 1
close_list = list(self.dict_seats.values())
if passenger_name in close_list:
close_num_seat = int(close_list.index(passenger_name) + 1)
self.dict_seats.update({close_num_seat: 'Free'})
else:
print(f'Passenger {passenger_name} is not on the bus')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bus:
def __init__(self):
self.seats = 0
self.dict_seats = {}
self.num_passenger = 0
def conctructor(self, seats):
self.seats = seats
for i in range(1, self.seats + 1):
self.dict_seats.update({i: 'Free'})
return self.dict_seats
<|reserved_special_token_0|>
def getOn_2(self, *names):
str_names = str(names)
str_names.strip('')
list_names = str_names.split(' ')
for i in list_names:
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {i}. There is no free seat on the bus')
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index('Free')) + 1
self.dict_seats.update({free_num_seat: i})
def getOf(self, passenger_name=None):
self.num_passenger -= 1
close_list = list(self.dict_seats.values())
if passenger_name in close_list:
close_num_seat = int(close_list.index(passenger_name) + 1)
self.dict_seats.update({close_num_seat: 'Free'})
else:
print(f'Passenger {passenger_name} is not on the bus')
def __str__(self):
return f"""Number of seats on the bus - {self.seats}
Number of passenger - {self.num_passenger}
Free seats - {self.seats - self.num_passenger}
Other details - {self.dict_seats}"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bus:
def __init__(self):
self.seats = 0
self.dict_seats = {}
self.num_passenger = 0
def conctructor(self, seats):
self.seats = seats
for i in range(1, self.seats + 1):
self.dict_seats.update({i: 'Free'})
return self.dict_seats
def getOn(self, passenger_name=None):
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(
f'Sorry dear {passenger_name}. There is no free seat on the bus'
)
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index('Free')) + 1
self.dict_seats.update({free_num_seat: passenger_name})
def getOn_2(self, *names):
str_names = str(names)
str_names.strip('')
list_names = str_names.split(' ')
for i in list_names:
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {i}. There is no free seat on the bus')
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index('Free')) + 1
self.dict_seats.update({free_num_seat: i})
def getOf(self, passenger_name=None):
self.num_passenger -= 1
close_list = list(self.dict_seats.values())
if passenger_name in close_list:
close_num_seat = int(close_list.index(passenger_name) + 1)
self.dict_seats.update({close_num_seat: 'Free'})
else:
print(f'Passenger {passenger_name} is not on the bus')
def __str__(self):
return f"""Number of seats on the bus - {self.seats}
Number of passenger - {self.num_passenger}
Free seats - {self.seats - self.num_passenger}
Other details - {self.dict_seats}"""
<|reserved_special_token_1|>
import sys
class Bus:
def __init__(self):
self.seats = 0
self.dict_seats = {}
self.num_passenger = 0
def conctructor(self, seats):
self.seats = seats
for i in range(1, self.seats + 1):
self.dict_seats.update({i: 'Free'})
return self.dict_seats
def getOn(self, passenger_name=None):
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(
f'Sorry dear {passenger_name}. There is no free seat on the bus'
)
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index('Free')) + 1
self.dict_seats.update({free_num_seat: passenger_name})
def getOn_2(self, *names):
str_names = str(names)
str_names.strip('')
list_names = str_names.split(' ')
for i in list_names:
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {i}. There is no free seat on the bus')
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index('Free')) + 1
self.dict_seats.update({free_num_seat: i})
def getOf(self, passenger_name=None):
self.num_passenger -= 1
close_list = list(self.dict_seats.values())
if passenger_name in close_list:
close_num_seat = int(close_list.index(passenger_name) + 1)
self.dict_seats.update({close_num_seat: 'Free'})
else:
print(f'Passenger {passenger_name} is not on the bus')
def __str__(self):
return f"""Number of seats on the bus - {self.seats}
Number of passenger - {self.num_passenger}
Free seats - {self.seats - self.num_passenger}
Other details - {self.dict_seats}"""
<|reserved_special_token_1|>
import sys
class Bus:
def __init__(self):
self.seats=0
self.dict_seats={}
self.num_passenger = 0
def conctructor(self,seats):
self.seats=seats
for i in range(1,self.seats+1):
self.dict_seats.update({i:"Free"})
return self.dict_seats
def getOn(self, passenger_name=None):
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {passenger_name}. There is no free seat on the bus')
free_list = list(self.dict_seats.values())
free_num_seat = int(free_list.index("Free"))+1
self.dict_seats.update({free_num_seat : passenger_name})
def getOn_2(self,*names):
str_names=str(names)
str_names.strip("")
list_names=str_names.split(" ")
for i in list_names:
self.num_passenger += 1
if self.num_passenger > self.seats:
sys.exit(f'Sorry dear {i}. There is no free seat on the bus')
free_list=list(self.dict_seats.values())
free_num_seat=int(free_list.index("Free"))+1
self.dict_seats.update({free_num_seat : i})
def getOf(self,passenger_name=None):
self.num_passenger -= 1
close_list = list(self.dict_seats.values())
if passenger_name in close_list:
close_num_seat = int(close_list.index(passenger_name) + 1)
self.dict_seats.update({close_num_seat: "Free"})
else:
print(f'Passenger {passenger_name} is not on the bus')
def __str__(self):
return f'Number of seats on the bus - {self.seats}\nNumber of passenger - {self.num_passenger}'\
f'\nFree seats - {self.seats-self.num_passenger}'\
f'\nOther details - {self.dict_seats}'
|
flexible
|
{
"blob_id": "1396509f65d194eeaefa3841e152b7078abf0032",
"index": 5549,
"step-1": "<mask token>\n\n\nclass Bus:\n <mask token>\n <mask token>\n <mask token>\n\n def getOn_2(self, *names):\n str_names = str(names)\n str_names.strip('')\n list_names = str_names.split(' ')\n for i in list_names:\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(f'Sorry dear {i}. There is no free seat on the bus')\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index('Free')) + 1\n self.dict_seats.update({free_num_seat: i})\n\n def getOf(self, passenger_name=None):\n self.num_passenger -= 1\n close_list = list(self.dict_seats.values())\n if passenger_name in close_list:\n close_num_seat = int(close_list.index(passenger_name) + 1)\n self.dict_seats.update({close_num_seat: 'Free'})\n else:\n print(f'Passenger {passenger_name} is not on the bus')\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Bus:\n\n def __init__(self):\n self.seats = 0\n self.dict_seats = {}\n self.num_passenger = 0\n\n def conctructor(self, seats):\n self.seats = seats\n for i in range(1, self.seats + 1):\n self.dict_seats.update({i: 'Free'})\n return self.dict_seats\n <mask token>\n\n def getOn_2(self, *names):\n str_names = str(names)\n str_names.strip('')\n list_names = str_names.split(' ')\n for i in list_names:\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(f'Sorry dear {i}. There is no free seat on the bus')\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index('Free')) + 1\n self.dict_seats.update({free_num_seat: i})\n\n def getOf(self, passenger_name=None):\n self.num_passenger -= 1\n close_list = list(self.dict_seats.values())\n if passenger_name in close_list:\n close_num_seat = int(close_list.index(passenger_name) + 1)\n self.dict_seats.update({close_num_seat: 'Free'})\n else:\n print(f'Passenger {passenger_name} is not on the bus')\n\n def __str__(self):\n return f\"\"\"Number of seats on the bus - {self.seats}\nNumber of passenger - {self.num_passenger}\nFree seats - {self.seats - self.num_passenger}\nOther details - {self.dict_seats}\"\"\"\n",
"step-3": "<mask token>\n\n\nclass Bus:\n\n def __init__(self):\n self.seats = 0\n self.dict_seats = {}\n self.num_passenger = 0\n\n def conctructor(self, seats):\n self.seats = seats\n for i in range(1, self.seats + 1):\n self.dict_seats.update({i: 'Free'})\n return self.dict_seats\n\n def getOn(self, passenger_name=None):\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(\n f'Sorry dear {passenger_name}. There is no free seat on the bus'\n )\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index('Free')) + 1\n self.dict_seats.update({free_num_seat: passenger_name})\n\n def getOn_2(self, *names):\n str_names = str(names)\n str_names.strip('')\n list_names = str_names.split(' ')\n for i in list_names:\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(f'Sorry dear {i}. There is no free seat on the bus')\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index('Free')) + 1\n self.dict_seats.update({free_num_seat: i})\n\n def getOf(self, passenger_name=None):\n self.num_passenger -= 1\n close_list = list(self.dict_seats.values())\n if passenger_name in close_list:\n close_num_seat = int(close_list.index(passenger_name) + 1)\n self.dict_seats.update({close_num_seat: 'Free'})\n else:\n print(f'Passenger {passenger_name} is not on the bus')\n\n def __str__(self):\n return f\"\"\"Number of seats on the bus - {self.seats}\nNumber of passenger - {self.num_passenger}\nFree seats - {self.seats - self.num_passenger}\nOther details - {self.dict_seats}\"\"\"\n",
"step-4": "import sys\n\n\nclass Bus:\n\n def __init__(self):\n self.seats = 0\n self.dict_seats = {}\n self.num_passenger = 0\n\n def conctructor(self, seats):\n self.seats = seats\n for i in range(1, self.seats + 1):\n self.dict_seats.update({i: 'Free'})\n return self.dict_seats\n\n def getOn(self, passenger_name=None):\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(\n f'Sorry dear {passenger_name}. There is no free seat on the bus'\n )\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index('Free')) + 1\n self.dict_seats.update({free_num_seat: passenger_name})\n\n def getOn_2(self, *names):\n str_names = str(names)\n str_names.strip('')\n list_names = str_names.split(' ')\n for i in list_names:\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(f'Sorry dear {i}. There is no free seat on the bus')\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index('Free')) + 1\n self.dict_seats.update({free_num_seat: i})\n\n def getOf(self, passenger_name=None):\n self.num_passenger -= 1\n close_list = list(self.dict_seats.values())\n if passenger_name in close_list:\n close_num_seat = int(close_list.index(passenger_name) + 1)\n self.dict_seats.update({close_num_seat: 'Free'})\n else:\n print(f'Passenger {passenger_name} is not on the bus')\n\n def __str__(self):\n return f\"\"\"Number of seats on the bus - {self.seats}\nNumber of passenger - {self.num_passenger}\nFree seats - {self.seats - self.num_passenger}\nOther details - {self.dict_seats}\"\"\"\n",
"step-5": "import sys\nclass Bus:\n def __init__(self):\n self.seats=0\n self.dict_seats={}\n self.num_passenger = 0\n\n def conctructor(self,seats):\n self.seats=seats\n for i in range(1,self.seats+1):\n self.dict_seats.update({i:\"Free\"})\n return self.dict_seats\n\n def getOn(self, passenger_name=None):\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(f'Sorry dear {passenger_name}. There is no free seat on the bus')\n free_list = list(self.dict_seats.values())\n free_num_seat = int(free_list.index(\"Free\"))+1\n self.dict_seats.update({free_num_seat : passenger_name})\n\n def getOn_2(self,*names):\n str_names=str(names)\n str_names.strip(\"\")\n list_names=str_names.split(\" \")\n for i in list_names:\n self.num_passenger += 1\n if self.num_passenger > self.seats:\n sys.exit(f'Sorry dear {i}. There is no free seat on the bus')\n free_list=list(self.dict_seats.values())\n free_num_seat=int(free_list.index(\"Free\"))+1\n self.dict_seats.update({free_num_seat : i})\n\n\n\n def getOf(self,passenger_name=None):\n self.num_passenger -= 1\n close_list = list(self.dict_seats.values())\n if passenger_name in close_list:\n close_num_seat = int(close_list.index(passenger_name) + 1)\n self.dict_seats.update({close_num_seat: \"Free\"})\n else:\n print(f'Passenger {passenger_name} is not on the bus')\n def __str__(self):\n return f'Number of seats on the bus - {self.seats}\\nNumber of passenger - {self.num_passenger}'\\\n f'\\nFree seats - {self.seats-self.num_passenger}'\\\n f'\\nOther details - {self.dict_seats}'\n\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def checkRaiz():
a = int(input('Informe o primeiro coeficiente: '))
b = int(input('Informe o segundo coeficiente: '))
c = int(input('Informe o terceiro coeficiente: '))
delta = b * b - 4 * a * c
if delta < 0:
print('Não tem raiz real')
elif delta == 0:
print('Existe uma raiz real')
else:
print('Existem duas raizes reais')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def checkRaiz():
a = int(input('Informe o primeiro coeficiente: '))
b = int(input('Informe o segundo coeficiente: '))
c = int(input('Informe o terceiro coeficiente: '))
delta = b * b - 4 * a * c
if delta < 0:
print('Não tem raiz real')
elif delta == 0:
print('Existe uma raiz real')
else:
print('Existem duas raizes reais')
checkRaiz()
<|reserved_special_token_1|>
def checkRaiz():
a = int(input("Informe o primeiro coeficiente: "))
b = int(input("Informe o segundo coeficiente: "))
c = int(input("Informe o terceiro coeficiente: "))
delta = (b*b) - (4*a*c)
if (delta < 0):
print("Não tem raiz real")
elif (delta == 0):
print("Existe uma raiz real")
else:
print("Existem duas raizes reais")
checkRaiz()
|
flexible
|
{
"blob_id": "603a73a7cc0487fcabb527ebc21d44cb95817ecb",
"index": 5909,
"step-1": "<mask token>\n",
"step-2": "def checkRaiz():\n a = int(input('Informe o primeiro coeficiente: '))\n b = int(input('Informe o segundo coeficiente: '))\n c = int(input('Informe o terceiro coeficiente: '))\n delta = b * b - 4 * a * c\n if delta < 0:\n print('Não tem raiz real')\n elif delta == 0:\n print('Existe uma raiz real')\n else:\n print('Existem duas raizes reais')\n\n\n<mask token>\n",
"step-3": "def checkRaiz():\n a = int(input('Informe o primeiro coeficiente: '))\n b = int(input('Informe o segundo coeficiente: '))\n c = int(input('Informe o terceiro coeficiente: '))\n delta = b * b - 4 * a * c\n if delta < 0:\n print('Não tem raiz real')\n elif delta == 0:\n print('Existe uma raiz real')\n else:\n print('Existem duas raizes reais')\n\n\ncheckRaiz()\n",
"step-4": "\ndef checkRaiz():\n a = int(input(\"Informe o primeiro coeficiente: \"))\n b = int(input(\"Informe o segundo coeficiente: \"))\n c = int(input(\"Informe o terceiro coeficiente: \"))\n\n delta = (b*b) - (4*a*c)\n\n if (delta < 0):\n print(\"Não tem raiz real\")\n elif (delta == 0):\n print(\"Existe uma raiz real\")\n else:\n print(\"Existem duas raizes reais\")\n\ncheckRaiz()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'Administrator'
#coding:utf-8
def calculate_score(calculation_params):
"""
计算选手在一跳中的得分,共7名裁判,去掉两个最高分和两个最低分,余下3名裁判员的分数之和乘以运动员所跳动作的难度系数,便得出该动作的实得分
传入参数为字典
calculation_params["score_list"] = []
calculation_params["difficulty"] = float
"""
score_list = calculation_params["score_list"]
difficulty = calculation_params["difficulty"]
score_list.sort()
temp_sum = 0.0
res = {}
res['expression'] = '('
for i in score_list[2:5]:
temp_sum += i
res['expression'] += "%.1f + " % i
res['final_score'] = temp_sum * difficulty
res['expression'] = res['expression'][:-3]
res['expression'] += ') * %.1f = %.1f' % (difficulty, res['final_score'])
return res
if __name__ == "__main__":
calculation_params = {
"score_list" : [1.0, 5.0, 3.0, 2.0, 9.0, 10.0, 2.0],
"difficulty" : 3.6,
}
print calculate_score(calculation_params)
|
normal
|
{
"blob_id": "52872804a069cd954bea247b64041eceafd8d139",
"index": 7673,
"step-1": "__author__ = 'Administrator'\n#coding:utf-8\ndef calculate_score(calculation_params):\n \"\"\"\n 计算选手在一跳中的得分,共7名裁判,去掉两个最高分和两个最低分,余下3名裁判员的分数之和乘以运动员所跳动作的难度系数,便得出该动作的实得分\n 传入参数为字典\n calculation_params[\"score_list\"] = []\n calculation_params[\"difficulty\"] = float\n \"\"\"\n score_list = calculation_params[\"score_list\"]\n difficulty = calculation_params[\"difficulty\"]\n score_list.sort()\n temp_sum = 0.0\n res = {}\n res['expression'] = '('\n for i in score_list[2:5]:\n temp_sum += i\n res['expression'] += \"%.1f + \" % i\n res['final_score'] = temp_sum * difficulty\n res['expression'] = res['expression'][:-3]\n res['expression'] += ') * %.1f = %.1f' % (difficulty, res['final_score'])\n return res\n\n\nif __name__ == \"__main__\":\n calculation_params = {\n \"score_list\" : [1.0, 5.0, 3.0, 2.0, 9.0, 10.0, 2.0],\n \"difficulty\" : 3.6,\n }\n\n print calculate_score(calculation_params)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse(url):
try:
parsed_url_components = url.split('//')
sublevel_split = parsed_url_components[1].split('/', 1)
domain = sublevel_split[0].replace('www.', '')
return domain
except IndexError:
print('URL format error!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse(url):
try:
parsed_url_components = url.split('//')
sublevel_split = parsed_url_components[1].split('/', 1)
domain = sublevel_split[0].replace('www.', '')
return domain
except IndexError:
print('URL format error!')
def analyze(results):
prompt = input('[.] Type <c> to print or <p> to plot\n[>] ')
if prompt == 'c':
for site, count in list(sites_count_sorted.items()):
print(site, count)
elif prompt == 'p':
plt.bar(list(range(len(results))), list(results.values()), align='edge'
)
plt.xticks(rotation=45)
plt.xticks(list(range(len(results))), list(results.keys()))
plt.show()
else:
print('[.] Uh?')
quit()
<|reserved_special_token_1|>
import os
import sqlite3
import operator
from collections import OrderedDict
import matplotlib.pyplot as plt
def parse(url):
try:
parsed_url_components = url.split('//')
sublevel_split = parsed_url_components[1].split('/', 1)
domain = sublevel_split[0].replace('www.', '')
return domain
except IndexError:
print('URL format error!')
def analyze(results):
prompt = input('[.] Type <c> to print or <p> to plot\n[>] ')
if prompt == 'c':
for site, count in list(sites_count_sorted.items()):
print(site, count)
elif prompt == 'p':
plt.bar(list(range(len(results))), list(results.values()), align='edge'
)
plt.xticks(rotation=45)
plt.xticks(list(range(len(results))), list(results.keys()))
plt.show()
else:
print('[.] Uh?')
quit()
<|reserved_special_token_1|>
import os
import sqlite3
import operator
from collections import OrderedDict
import matplotlib.pyplot as plt
def parse(url):
try:
parsed_url_components = url.split('//')
sublevel_split = parsed_url_components[1].split('/', 1)
domain = sublevel_split[0].replace("www.", "")
return domain
except IndexError:
print("URL format error!")
def analyze(results):
prompt = input("[.] Type <c> to print or <p> to plot\n[>] ")
if prompt == "c":
for site, count in list(sites_count_sorted.items()):
print(site, count)
elif prompt == "p":
plt.bar(list(range(len(results))), list(results.values()), align='edge')
plt.xticks(rotation=45)
plt.xticks(list(range(len(results))), list(results.keys()))
plt.show()
else:
print("[.] Uh?")
quit()
|
flexible
|
{
"blob_id": "c74fc99bf8582fd83c312f27dfffbe894a2c8c1b",
"index": 3431,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\ndef analyze(results):\n prompt = input('[.] Type <c> to print or <p> to plot\\n[>] ')\n if prompt == 'c':\n for site, count in list(sites_count_sorted.items()):\n print(site, count)\n elif prompt == 'p':\n plt.bar(list(range(len(results))), list(results.values()), align='edge'\n )\n plt.xticks(rotation=45)\n plt.xticks(list(range(len(results))), list(results.keys()))\n plt.show()\n else:\n print('[.] Uh?')\n quit()\n",
"step-4": "import os\nimport sqlite3\nimport operator\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\n\ndef parse(url):\n try:\n parsed_url_components = url.split('//')\n sublevel_split = parsed_url_components[1].split('/', 1)\n domain = sublevel_split[0].replace('www.', '')\n return domain\n except IndexError:\n print('URL format error!')\n\n\ndef analyze(results):\n prompt = input('[.] Type <c> to print or <p> to plot\\n[>] ')\n if prompt == 'c':\n for site, count in list(sites_count_sorted.items()):\n print(site, count)\n elif prompt == 'p':\n plt.bar(list(range(len(results))), list(results.values()), align='edge'\n )\n plt.xticks(rotation=45)\n plt.xticks(list(range(len(results))), list(results.keys()))\n plt.show()\n else:\n print('[.] Uh?')\n quit()\n",
"step-5": "import os\nimport sqlite3\nimport operator\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\n\ndef parse(url):\n\ttry:\n\t\tparsed_url_components = url.split('//')\n\t\tsublevel_split = parsed_url_components[1].split('/', 1)\n\t\tdomain = sublevel_split[0].replace(\"www.\", \"\")\n\t\treturn domain\n\texcept IndexError:\n\t\tprint(\"URL format error!\")\n\ndef analyze(results):\n\n\tprompt = input(\"[.] Type <c> to print or <p> to plot\\n[>] \")\n\n\tif prompt == \"c\":\n\t\tfor site, count in list(sites_count_sorted.items()):\n\t\t\tprint(site, count)\n\telif prompt == \"p\":\n\t\tplt.bar(list(range(len(results))), list(results.values()), align='edge')\n\t\tplt.xticks(rotation=45)\n\t\tplt.xticks(list(range(len(results))), list(results.keys()))\n\t\tplt.show()\n\telse:\n\t\tprint(\"[.] Uh?\")\n\t\tquit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MONITOR = odict({'oxygen': {'name': 'O2 Concentration', 'units': '%',
'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals': 1},
'temperature': {'name': 'Temperature', 'units': '°C', 'abs_range': (0,
50), 'safe_range': (20, 30), 'decimals': 1}, 'humidity': {'name':
'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75),
'decimals': 1}, 'vte': {'name': 'VTE', 'units': '%', 'abs_range': (0,
100), 'safe_range': (20, 80), 'decimals': 1}})
CONTROL = {'oxygen': {'name': 'O2 Concentration', 'units': '%', 'abs_range':
(0, 100), 'value': 80, 'decimals': 1}, 'temperature': {'name':
'Temperature', 'units': '°C', 'abs_range': (0, 50), 'value': 23,
'decimals': 1}}
PLOTS = {'flow': {'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range':
(20, 80), 'color': styles.SUBWAY_COLORS['yellow']}, 'pressure': {'name':
'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80),
'color': styles.SUBWAY_COLORS['orange']}}
<|reserved_special_token_1|>
from collections import OrderedDict as odict
from vent.gui import styles
MONITOR = odict({'oxygen': {'name': 'O2 Concentration', 'units': '%',
'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals': 1},
'temperature': {'name': 'Temperature', 'units': '°C', 'abs_range': (0,
50), 'safe_range': (20, 30), 'decimals': 1}, 'humidity': {'name':
'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75),
'decimals': 1}, 'vte': {'name': 'VTE', 'units': '%', 'abs_range': (0,
100), 'safe_range': (20, 80), 'decimals': 1}})
CONTROL = {'oxygen': {'name': 'O2 Concentration', 'units': '%', 'abs_range':
(0, 100), 'value': 80, 'decimals': 1}, 'temperature': {'name':
'Temperature', 'units': '°C', 'abs_range': (0, 50), 'value': 23,
'decimals': 1}}
PLOTS = {'flow': {'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range':
(20, 80), 'color': styles.SUBWAY_COLORS['yellow']}, 'pressure': {'name':
'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80),
'color': styles.SUBWAY_COLORS['orange']}}
<|reserved_special_token_1|>
from collections import OrderedDict as odict
from vent.gui import styles
MONITOR = odict({
'oxygen': {
'name': 'O2 Concentration',
'units': '%',
'abs_range': (0, 100),
'safe_range': (60, 100),
'decimals' : 1
},
'temperature': {
'name': 'Temperature',
'units': '\N{DEGREE SIGN}C',
'abs_range': (0, 50),
'safe_range': (20, 30),
'decimals': 1
},
'humidity': {
'name': 'Humidity',
'units': '%',
'abs_range': (0, 100),
'safe_range': (20, 75),
'decimals': 1
},
'vte': {
'name': 'VTE',
'units': '%',
'abs_range': (0, 100),
'safe_range': (20, 80),
'decimals': 1
}
})
CONTROL = {
'oxygen': {
'name': 'O2 Concentration',
'units': '%',
'abs_range': (0, 100),
'value': 80,
'decimals': 1
},
'temperature': {
'name': 'Temperature',
'units': '\N{DEGREE SIGN}C',
'abs_range': (0, 50),
'value': 23,
'decimals': 1
},
}
PLOTS = {
'flow': {
'name': 'Flow (L/s)',
'abs_range': (0, 100),
'safe_range': (20, 80),
'color': styles.SUBWAY_COLORS['yellow'],
},
'pressure': {
'name': 'Pressure (mmHg)',
'abs_range': (0, 100),
'safe_range': (20, 80),
'color': styles.SUBWAY_COLORS['orange'],
}
}
|
flexible
|
{
"blob_id": "941dac77fe60081ffa113c437a356d59837f5883",
"index": 5304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nMONITOR = odict({'oxygen': {'name': 'O2 Concentration', 'units': '%',\n 'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals': 1},\n 'temperature': {'name': 'Temperature', 'units': '°C', 'abs_range': (0, \n 50), 'safe_range': (20, 30), 'decimals': 1}, 'humidity': {'name':\n 'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75),\n 'decimals': 1}, 'vte': {'name': 'VTE', 'units': '%', 'abs_range': (0, \n 100), 'safe_range': (20, 80), 'decimals': 1}})\nCONTROL = {'oxygen': {'name': 'O2 Concentration', 'units': '%', 'abs_range':\n (0, 100), 'value': 80, 'decimals': 1}, 'temperature': {'name':\n 'Temperature', 'units': '°C', 'abs_range': (0, 50), 'value': 23,\n 'decimals': 1}}\nPLOTS = {'flow': {'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range':\n (20, 80), 'color': styles.SUBWAY_COLORS['yellow']}, 'pressure': {'name':\n 'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['orange']}}\n",
"step-3": "from collections import OrderedDict as odict\nfrom vent.gui import styles\nMONITOR = odict({'oxygen': {'name': 'O2 Concentration', 'units': '%',\n 'abs_range': (0, 100), 'safe_range': (60, 100), 'decimals': 1},\n 'temperature': {'name': 'Temperature', 'units': '°C', 'abs_range': (0, \n 50), 'safe_range': (20, 30), 'decimals': 1}, 'humidity': {'name':\n 'Humidity', 'units': '%', 'abs_range': (0, 100), 'safe_range': (20, 75),\n 'decimals': 1}, 'vte': {'name': 'VTE', 'units': '%', 'abs_range': (0, \n 100), 'safe_range': (20, 80), 'decimals': 1}})\nCONTROL = {'oxygen': {'name': 'O2 Concentration', 'units': '%', 'abs_range':\n (0, 100), 'value': 80, 'decimals': 1}, 'temperature': {'name':\n 'Temperature', 'units': '°C', 'abs_range': (0, 50), 'value': 23,\n 'decimals': 1}}\nPLOTS = {'flow': {'name': 'Flow (L/s)', 'abs_range': (0, 100), 'safe_range':\n (20, 80), 'color': styles.SUBWAY_COLORS['yellow']}, 'pressure': {'name':\n 'Pressure (mmHg)', 'abs_range': (0, 100), 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['orange']}}\n",
"step-4": "from collections import OrderedDict as odict\n\nfrom vent.gui import styles\n\nMONITOR = odict({\n 'oxygen': {\n 'name': 'O2 Concentration',\n 'units': '%',\n 'abs_range': (0, 100),\n 'safe_range': (60, 100),\n 'decimals' : 1\n },\n 'temperature': {\n 'name': 'Temperature',\n 'units': '\\N{DEGREE SIGN}C',\n 'abs_range': (0, 50),\n 'safe_range': (20, 30),\n 'decimals': 1\n },\n 'humidity': {\n 'name': 'Humidity',\n 'units': '%',\n 'abs_range': (0, 100),\n 'safe_range': (20, 75),\n 'decimals': 1\n },\n 'vte': {\n 'name': 'VTE',\n 'units': '%',\n 'abs_range': (0, 100),\n 'safe_range': (20, 80),\n 'decimals': 1\n }\n })\n\n\nCONTROL = {\n 'oxygen': {\n 'name': 'O2 Concentration',\n 'units': '%',\n 'abs_range': (0, 100),\n 'value': 80,\n 'decimals': 1\n },\n 'temperature': {\n 'name': 'Temperature',\n 'units': '\\N{DEGREE SIGN}C',\n 'abs_range': (0, 50),\n 'value': 23,\n 'decimals': 1\n },\n }\n\nPLOTS = {\n 'flow': {\n 'name': 'Flow (L/s)',\n 'abs_range': (0, 100),\n 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['yellow'],\n },\n 'pressure': {\n 'name': 'Pressure (mmHg)',\n 'abs_range': (0, 100),\n 'safe_range': (20, 80),\n 'color': styles.SUBWAY_COLORS['orange'],\n }\n }",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time as t
class Record:
def __init__(self, value=10, name='name'):
self.id = name
self.value = value
def __get__(self, instance, owner):
with open('record.txt', 'a') as f:
msg = '读取变量%s ' % self.id
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
return self.value
def __set__(self, instance, value):
with open('record.txt', 'a') as f:
msg = '更改变量%s为%s ' % (self.id, str(value))
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
self.value = value
|
normal
|
{
"blob_id": "3e1540a06c478d471f6e6a190cadc44d5c4c2467",
"index": 665,
"step-1": "<mask token>\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n <mask token>\n\n def __set__(self, instance, value):\n with open('record.txt', 'a') as f:\n msg = '更改变量%s为%s ' % (self.id, str(value))\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n self.value = value\n",
"step-3": "<mask token>\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n\n def __get__(self, instance, owner):\n with open('record.txt', 'a') as f:\n msg = '读取变量%s ' % self.id\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n return self.value\n\n def __set__(self, instance, value):\n with open('record.txt', 'a') as f:\n msg = '更改变量%s为%s ' % (self.id, str(value))\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n self.value = value\n",
"step-4": "import time as t\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n\n def __get__(self, instance, owner):\n with open('record.txt', 'a') as f:\n msg = '读取变量%s ' % self.id\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n return self.value\n\n def __set__(self, instance, value):\n with open('record.txt', 'a') as f:\n msg = '更改变量%s为%s ' % (self.id, str(value))\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n self.value = value\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request,
'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {'form': form}
return render(request, 'map_app/countries/add.html', context)
<|reserved_special_token_0|>
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(request,
'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {'country': country, 'form': form}
return render(request, 'map_app/countries/edit.html', context)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request,
'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {'form': form}
return render(request, 'map_app/countries/add.html', context)
@login_required(login_url='user_login')
def country_details(request, id):
country = get_object_or_404(Countries, id=id)
context = {'country': country}
return render(request, 'map_app/countries/details.html', context)
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(request,
'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {'country': country, 'form': form}
return render(request, 'map_app/countries/edit.html', context)
@login_required(login_url='user_login')
def country_delete(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
country.delete()
messages.success(request, 'the country has been deleted in successfuly'
)
return redirect('home')
context = {'country': country}
return render(request, 'map_app/countries/delete.html', context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@login_required(login_url='user_login')
def countries_list(request):
countries = Countries.objects.all()
context = {'countries': countries}
return render(request, 'map_app/countries/list.html', context)
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request,
'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {'form': form}
return render(request, 'map_app/countries/add.html', context)
@login_required(login_url='user_login')
def country_details(request, id):
country = get_object_or_404(Countries, id=id)
context = {'country': country}
return render(request, 'map_app/countries/details.html', context)
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(request,
'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {'country': country, 'form': form}
return render(request, 'map_app/countries/edit.html', context)
@login_required(login_url='user_login')
def country_delete(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
country.delete()
messages.success(request, 'the country has been deleted in successfuly'
)
return redirect('home')
context = {'country': country}
return render(request, 'map_app/countries/delete.html', context)
<|reserved_special_token_1|>
import urllib
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.utils.translation import gettext as _
from .forms import CountryForm
from .models import Countries
from django.utils.timezone import datetime
from django.contrib.auth.decorators import login_required
@login_required(login_url='user_login')
def countries_list(request):
countries = Countries.objects.all()
context = {'countries': countries}
return render(request, 'map_app/countries/list.html', context)
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request,
'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {'form': form}
return render(request, 'map_app/countries/add.html', context)
@login_required(login_url='user_login')
def country_details(request, id):
country = get_object_or_404(Countries, id=id)
context = {'country': country}
return render(request, 'map_app/countries/details.html', context)
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(request,
'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {'country': country, 'form': form}
return render(request, 'map_app/countries/edit.html', context)
@login_required(login_url='user_login')
def country_delete(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
country.delete()
messages.success(request, 'the country has been deleted in successfuly'
)
return redirect('home')
context = {'country': country}
return render(request, 'map_app/countries/delete.html', context)
<|reserved_special_token_1|>
import urllib
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.utils.translation import gettext as _
from .forms import CountryForm
from .models import Countries
from django.utils.timezone import datetime
from django.contrib.auth.decorators import login_required
# COUNTRIES LIST
@login_required(login_url='user_login')
def countries_list(request):
countries = Countries.objects.all()
context = {
'countries': countries,
}
return render(request, 'map_app/countries/list.html', context)
# CREATE COUNTRY
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(
request, 'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {
'form': form,
}
return render(request, 'map_app/countries/add.html', context)
# DETAILS OF COUNTRY
@login_required(login_url='user_login')
def country_details(request, id):
country = get_object_or_404(Countries, id=id)
context = {
'country': country,
}
return render(request, 'map_app/countries/details.html', context)
# UPDATE COUNTRY
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(
request, 'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {
'country': country,
'form': form,
}
return render(request, 'map_app/countries/edit.html', context)
# DELETE COUNTRY
@login_required(login_url='user_login')
def country_delete(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
country.delete()
messages.success(
request, 'the country has been deleted in successfuly')
return redirect('home')
context = {
'country': country,
}
return render(request, 'map_app/countries/delete.html', context)
|
flexible
|
{
"blob_id": "8640de519ebf7f95588ac40b55662da85ffc926e",
"index": 5224,
"step-1": "<mask token>\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n<mask token>\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n context = {'country': country}\n return render(request, 'map_app/countries/details.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n messages.success(request, 'the country has been deleted in successfuly'\n )\n return redirect('home')\n context = {'country': country}\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-3": "<mask token>\n\n\n@login_required(login_url='user_login')\ndef countries_list(request):\n countries = Countries.objects.all()\n context = {'countries': countries}\n return render(request, 'map_app/countries/list.html', context)\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n context = {'country': country}\n return render(request, 'map_app/countries/details.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n messages.success(request, 'the country has been deleted in successfuly'\n )\n return redirect('home')\n context = {'country': country}\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-4": "import urllib\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.utils.translation import gettext as _\nfrom .forms import CountryForm\nfrom .models import Countries\nfrom django.utils.timezone import datetime\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required(login_url='user_login')\ndef countries_list(request):\n countries = Countries.objects.all()\n context = {'countries': countries}\n return render(request, 'map_app/countries/list.html', context)\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n context = {'country': country}\n return render(request, 'map_app/countries/details.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n messages.success(request, 'the country has been deleted in successfuly'\n )\n return redirect('home')\n context = {'country': country}\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-5": "import urllib\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.utils.translation import gettext as _\nfrom .forms import CountryForm\nfrom .models import Countries\nfrom django.utils.timezone import datetime\nfrom django.contrib.auth.decorators import login_required\n\n\n# COUNTRIES LIST\n@login_required(login_url='user_login')\ndef countries_list(request):\n countries = Countries.objects.all()\n context = {\n 'countries': countries,\n }\n return render(request, 'map_app/countries/list.html', context)\n\n\n# CREATE COUNTRY\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(\n request, 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {\n 'form': form,\n }\n return render(request, 'map_app/countries/add.html', context)\n\n\n# DETAILS OF COUNTRY\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n\n context = {\n 'country': country,\n }\n return render(request, 'map_app/countries/details.html', context)\n\n# UPDATE COUNTRY\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(\n request, 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n\n context = {\n 'country': country,\n 'form': form,\n }\n return render(request, 'map_app/countries/edit.html', context)\n\n\n# DELETE COUNTRY\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n\n messages.success(\n request, 'the country has been deleted in successfuly')\n return redirect('home')\n\n context = {\n 'country': country,\n }\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import pickle
from generation_code import serial_filename
import serial_output_code
import numpy as np
from shutil import copyfile
from os import remove
# This file is only temporary, mostly to be used when updating the
# reference output from a regression test, to ensure that, in all
# aspects that are in common with the previosu regression test, the new
# solution is the same.
# It is largely the same as test_serial_code.py
temp_filename = 'temp.pickle'
try:
# Copy reference output to temporary location
copyfile(serial_filename(),temp_filename)
# Run serial code
serial_output_code.serial_output_code()
with open(serial_filename(),'rb') as f:
qmc_out = pickle.load(f)
with open(temp_filename,'rb') as f:
old_out = pickle.load(f)
finally:
# Copy reference output back
copyfile(temp_filename,serial_filename())
# Remove temporary file
remove(temp_filename)
assert qmc_out[0] == old_out[0] # should be a float
print(len(qmc_out))
print(len(old_out))
assert len(qmc_out) == (len(old_out) + 1) # Because we've added in a new output
for ii in range(1,len(old_out)):
assert(len(old_out[ii])==len(qmc_out[ii]))
for jj in range(len(qmc_out[1])):
# For some reason, the sizes of these variables (in
# bytes) aren't always the same. I've no idea why.
# Hence, this assertion is commented out.
#assert getsizeof(qmc_out[ii][jj]) == getsizeof(old_out[ii][jj])
#assert np.all(np.isclose(qmc_out[ii][jj],old_out[ii][jj]))
assert np.all(np.isclose(qmc_out[ii][jj],old_out[ii][jj]))
|
normal
|
{
"blob_id": "6acb253189798c22d47feb3d61ac68a1851d22ba",
"index": 1619,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n copyfile(serial_filename(), temp_filename)\n serial_output_code.serial_output_code()\n with open(serial_filename(), 'rb') as f:\n qmc_out = pickle.load(f)\n with open(temp_filename, 'rb') as f:\n old_out = pickle.load(f)\nfinally:\n copyfile(temp_filename, serial_filename())\n remove(temp_filename)\nassert qmc_out[0] == old_out[0]\nprint(len(qmc_out))\nprint(len(old_out))\nassert len(qmc_out) == len(old_out) + 1\nfor ii in range(1, len(old_out)):\n assert len(old_out[ii]) == len(qmc_out[ii])\n for jj in range(len(qmc_out[1])):\n assert np.all(np.isclose(qmc_out[ii][jj], old_out[ii][jj]))\n",
"step-3": "<mask token>\ntemp_filename = 'temp.pickle'\ntry:\n copyfile(serial_filename(), temp_filename)\n serial_output_code.serial_output_code()\n with open(serial_filename(), 'rb') as f:\n qmc_out = pickle.load(f)\n with open(temp_filename, 'rb') as f:\n old_out = pickle.load(f)\nfinally:\n copyfile(temp_filename, serial_filename())\n remove(temp_filename)\nassert qmc_out[0] == old_out[0]\nprint(len(qmc_out))\nprint(len(old_out))\nassert len(qmc_out) == len(old_out) + 1\nfor ii in range(1, len(old_out)):\n assert len(old_out[ii]) == len(qmc_out[ii])\n for jj in range(len(qmc_out[1])):\n assert np.all(np.isclose(qmc_out[ii][jj], old_out[ii][jj]))\n",
"step-4": "import pickle\nfrom generation_code import serial_filename\nimport serial_output_code\nimport numpy as np\nfrom shutil import copyfile\nfrom os import remove\ntemp_filename = 'temp.pickle'\ntry:\n copyfile(serial_filename(), temp_filename)\n serial_output_code.serial_output_code()\n with open(serial_filename(), 'rb') as f:\n qmc_out = pickle.load(f)\n with open(temp_filename, 'rb') as f:\n old_out = pickle.load(f)\nfinally:\n copyfile(temp_filename, serial_filename())\n remove(temp_filename)\nassert qmc_out[0] == old_out[0]\nprint(len(qmc_out))\nprint(len(old_out))\nassert len(qmc_out) == len(old_out) + 1\nfor ii in range(1, len(old_out)):\n assert len(old_out[ii]) == len(qmc_out[ii])\n for jj in range(len(qmc_out[1])):\n assert np.all(np.isclose(qmc_out[ii][jj], old_out[ii][jj]))\n",
"step-5": "import pickle\nfrom generation_code import serial_filename\nimport serial_output_code\nimport numpy as np\nfrom shutil import copyfile\nfrom os import remove\n\n# This file is only temporary, mostly to be used when updating the\n# reference output from a regression test, to ensure that, in all\n# aspects that are in common with the previosu regression test, the new\n# solution is the same.\n\n# It is largely the same as test_serial_code.py\n\ntemp_filename = 'temp.pickle'\n\ntry:\n # Copy reference output to temporary location\n copyfile(serial_filename(),temp_filename)\n\n # Run serial code\n serial_output_code.serial_output_code()\n\n with open(serial_filename(),'rb') as f:\n qmc_out = pickle.load(f)\n\n with open(temp_filename,'rb') as f:\n old_out = pickle.load(f)\nfinally:\n # Copy reference output back\n copyfile(temp_filename,serial_filename())\n\n # Remove temporary file\n remove(temp_filename)\n\n\nassert qmc_out[0] == old_out[0] # should be a float\n\nprint(len(qmc_out))\n\nprint(len(old_out))\n\nassert len(qmc_out) == (len(old_out) + 1) # Because we've added in a new output\n\nfor ii in range(1,len(old_out)):\n assert(len(old_out[ii])==len(qmc_out[ii]))\n for jj in range(len(qmc_out[1])):\n # For some reason, the sizes of these variables (in\n # bytes) aren't always the same. I've no idea why.\n # Hence, this assertion is commented out.\n #assert getsizeof(qmc_out[ii][jj]) == getsizeof(old_out[ii][jj]) \n #assert np.all(np.isclose(qmc_out[ii][jj],old_out[ii][jj]))\n assert np.all(np.isclose(qmc_out[ii][jj],old_out[ii][jj]))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import json
with open(__file__.replace('.py', '.txt')) as f:
problem = f.read()
data = {
'problem': problem,
'example': """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L""" # should give 42
}
def solve_problem(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
orbit_counts = {'COM': 0}
for object in tuple(parents.keys()):
stack = [object]
while stack[-1] not in orbit_counts:
stack.append(parents[stack[-1]])
known = orbit_counts[stack.pop()]
stack.reverse()
for thing in stack:
orbit_counts[thing] = orbit_counts[parents[thing]] + 1
return sum(orbit_counts.values())
# part 1
if sys.argv[-1] in data.keys():
scenarios = (sys.argv[-1],)
else:
scenarios = tuple(data.keys())
for scenario in scenarios:
input = data[scenario]
r = solve_problem(input)
print(f'FINAL ANSWER: {r}')
# 932, too low
print('')
print('**** PART 2 ******')
def get_parents(key, parents):
"""Get parents for a particular key through parents dict"""
r = [key]
while True:
this_one = r[-1]
if this_one == 'COM':
return r
r.append(parents[this_one])
def part2(input):
parents = {}
for i, line in enumerate(input.split('\n')):
about, object = line.split(')')
parents[object] = about
santa = get_parents('SAN', parents)
me = get_parents('YOU', parents)
for i, planet in enumerate(me):
if planet in santa:
print(f'met at {planet}')
print('')
print(santa[:santa.index(planet) + 1])
print(len(santa[:santa.index(planet) + 1]))
# minus one because we want traversials between elements in list
print(santa.index(planet))
print('')
print(me[:i + 1])
print(len(me[:i + 1]))
# minus one because we want traversials between elements in list
print(i)
# minus another one because transfering to the planet is already counted
# ...or something like that
# minus one because problem said so
return i + santa.index(planet) - 1
data['example'] = """COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
for scenario in scenarios:
input = data[scenario]
r = part2(input)
print(f'Part 2 answer {r}')
# 432, too high
# 433, too high
# 431, too high
# 430, correct
|
normal
|
{
"blob_id": "e57680c9bd09866e68ade0cfea7ce83cd6d50f58",
"index": 1596,
"step-1": "<mask token>\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\n<mask token>\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\n<mask token>\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\nif sys.argv[-1] in data.keys():\n scenarios = sys.argv[-1],\nelse:\n scenarios = tuple(data.keys())\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\n<mask token>\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n",
"step-3": "<mask token>\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\ndata = {'problem': problem, 'example':\n \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\"}\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\nif sys.argv[-1] in data.keys():\n scenarios = sys.argv[-1],\nelse:\n scenarios = tuple(data.keys())\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\ndata['example'] = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\"\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n",
"step-4": "import sys\nimport json\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\ndata = {'problem': problem, 'example':\n \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\"}\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n orbit_counts = {'COM': 0}\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n return sum(orbit_counts.values())\n\n\nif sys.argv[-1] in data.keys():\n scenarios = sys.argv[-1],\nelse:\n scenarios = tuple(data.keys())\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n print(i)\n return i + santa.index(planet) - 1\n\n\ndata['example'] = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\"\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n",
"step-5": "import sys\nimport json\n\n\nwith open(__file__.replace('.py', '.txt')) as f:\n problem = f.read()\n\n\ndata = {\n 'problem': problem,\n 'example': \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\"\"\" # should give 42\n}\n\n\ndef solve_problem(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n\n orbit_counts = {'COM': 0}\n\n for object in tuple(parents.keys()):\n stack = [object]\n while stack[-1] not in orbit_counts:\n stack.append(parents[stack[-1]])\n known = orbit_counts[stack.pop()]\n stack.reverse()\n for thing in stack:\n orbit_counts[thing] = orbit_counts[parents[thing]] + 1\n\n return sum(orbit_counts.values())\n\n\n# part 1\nif sys.argv[-1] in data.keys():\n scenarios = (sys.argv[-1],)\nelse:\n scenarios = tuple(data.keys())\n\n\nfor scenario in scenarios:\n input = data[scenario]\n r = solve_problem(input)\n print(f'FINAL ANSWER: {r}')\n\n\n# 932, too low\n\nprint('')\nprint('**** PART 2 ******')\n\n\ndef get_parents(key, parents):\n \"\"\"Get parents for a particular key through parents dict\"\"\"\n r = [key]\n while True:\n this_one = r[-1]\n if this_one == 'COM':\n return r\n r.append(parents[this_one])\n\n\ndef part2(input):\n parents = {}\n for i, line in enumerate(input.split('\\n')):\n about, object = line.split(')')\n parents[object] = about\n\n santa = get_parents('SAN', parents)\n me = get_parents('YOU', parents)\n\n for i, planet in enumerate(me):\n if planet in santa:\n print(f'met at {planet}')\n print('')\n print(santa[:santa.index(planet) + 1])\n print(len(santa[:santa.index(planet) + 1]))\n # minus one because we want traversials between elements in list\n print(santa.index(planet))\n print('')\n print(me[:i + 1])\n print(len(me[:i + 1]))\n # minus one because we want traversials between elements in list\n print(i)\n # minus another one because transfering to the planet is already counted\n # ...or something like that\n # minus one because problem said so\n return i + santa.index(planet) - 1\n\ndata['example'] = \"\"\"COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN\"\"\"\n\nfor scenario in scenarios:\n input = data[scenario]\n r = part2(input)\n print(f'Part 2 answer {r}')\n\n# 432, too high\n# 433, too high\n# 431, too high\n# 430, correct\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# 斐波那契数列(后一位=前两位之和)
# 又叫黄金分割数列,前/后 越来越接近0.618
# list1 = []
# for i in range(20):
# if i <= 1:
# list1.append(1)
# else:
# list1.append(list1[-2]+list1[-1])
# print(list1)
import random
dict1 = {'A': [], 'B': [], 'C': [], 'D': []}
for i in range(20):
re = random.randint(1, 100)
if re >= 90:
dict1['A'].append(re)
elif re >= 80:
dict1['B'].append(re)
print(dict1)
|
normal
|
{
"blob_id": "4a223cdd3c957af2f54e33c910ce70d2b5e6c963",
"index": 7705,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(20):\n re = random.randint(1, 100)\n if re >= 90:\n dict1['A'].append(re)\n elif re >= 80:\n dict1['B'].append(re)\nprint(dict1)\n",
"step-3": "<mask token>\ndict1 = {'A': [], 'B': [], 'C': [], 'D': []}\nfor i in range(20):\n re = random.randint(1, 100)\n if re >= 90:\n dict1['A'].append(re)\n elif re >= 80:\n dict1['B'].append(re)\nprint(dict1)\n",
"step-4": "import random\ndict1 = {'A': [], 'B': [], 'C': [], 'D': []}\nfor i in range(20):\n re = random.randint(1, 100)\n if re >= 90:\n dict1['A'].append(re)\n elif re >= 80:\n dict1['B'].append(re)\nprint(dict1)\n",
"step-5": "# 斐波那契数列(后一位=前两位之和)\n# 又叫黄金分割数列,前/后 越来越接近0.618\n# list1 = []\n# for i in range(20):\n# if i <= 1:\n# list1.append(1)\n# else:\n# list1.append(list1[-2]+list1[-1])\n# print(list1)\n\n\nimport random\ndict1 = {'A': [], 'B': [], 'C': [], 'D': []}\nfor i in range(20):\n re = random.randint(1, 100)\n if re >= 90:\n dict1['A'].append(re)\n elif re >= 80:\n dict1['B'].append(re)\nprint(dict1)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
# capturing Video from Webcam
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
myColors = [[20,40,40,70,255,255],
[100,169,121,135,255,255],
[0, 90, 90, 41, 255, 255]]
color_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]
x, y, w, h = 0, 0, 0, 0
my_points = []
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x,y,count])
count += 1
return new_points
def contour_detect(mask):
x,y,w,h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]),
15, color_value[point[2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "836c1d2083d18c68fe551278d2df4155edc64c8c",
"index": 5298,
"step-1": "<mask token>\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\n<mask token>\n",
"step-2": "<mask token>\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\n<mask token>\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\nmyColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,\n 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\nmyColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,\n 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\nframeWidth = 640\nframeHeight = 480\n\n# capturing Video from Webcam\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\n\ncap.set(10, 150)\n\nmyColors = [[20,40,40,70,255,255],\n [100,169,121,135,255,255],\n [0, 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n\n cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x,y,count])\n count += 1\n return new_points\n\n\n\ndef contour_detect(mask):\n x,y,w,h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]),\n 15, color_value[point[2]], -1)\n\n\nwhile True:\n\n ret, frame = cap.read()\n\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from Bio import BiopythonWarning, SeqIO
from Bio.PDB import MMCIFParser, Dice, PDBParser
from Bio.SeqUtils import seq1
import time
import requests
import re
import warnings
warnings.simplefilter('ignore', BiopythonWarning)
def get_response(url):
response = requests.get(url)
cnt = 20
while cnt != 0:
if response.status_code == 200:
return response.content.decode()
else:
time.sleep(1)
cnt -= 1
raise IOError(f"Some issues with PDB now. Try again later...\n(URL: {url}")
def get_seq_names(path_to_fasta):
values = list(zip(*[(str(record.seq), record.id)
for record in SeqIO.parse(path_to_fasta, "fasta")]))
if len(values) == 0:
return []
else:
_, names = values
return names
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0]
if chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1("".join([residue.get_resname() for residue in self.chain]))
seq_from_pdb = re.search("^X*(.*?)X*$", seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace("-", "")
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[end_on_indices]
final_seq = \
[r.get_resname() for r in self.chain.get_residues()
if start <= r.get_id()[1] <= end]
if "UNK" in final_seq:
with open(out_file, "w") as f:
f.write("")
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
def __init__(self, pdb_id, chain_id, cif_dir, file_type="cif"):
self.pdb_id = pdb_id
self.chain_id = str(chain_id)
if file_type == "cif":
self.parser = MMCIFParser()
else:
self.parser = PDBParser()
self.structure = self.parser.get_structure(pdb_id, cif_dir + f"{pdb_id}.{file_type}")
self.chain = self.get_chain()
self.seq, self.indices = self.get_seq_from_pdb()
|
normal
|
{
"blob_id": "ad5cdcfd9d7a3c07abcdcb701422f3c0fdc2b374",
"index": 8860,
"step-1": "<mask token>\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n <mask token>\n",
"step-2": "<mask token>\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f'Some issues with PDB now. Try again later...\\n(URL: {url}')\n\n\n<mask token>\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == 'cif':\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir +\n f'{pdb_id}.{file_type}')\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-3": "<mask token>\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f'Some issues with PDB now. Try again later...\\n(URL: {url}')\n\n\ndef get_seq_names(path_to_fasta):\n values = list(zip(*[(str(record.seq), record.id) for record in SeqIO.\n parse(path_to_fasta, 'fasta')]))\n if len(values) == 0:\n return []\n else:\n _, names = values\n return names\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == 'cif':\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir +\n f'{pdb_id}.{file_type}')\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-4": "from Bio import BiopythonWarning, SeqIO\nfrom Bio.PDB import MMCIFParser, Dice, PDBParser\nfrom Bio.SeqUtils import seq1\nimport time\nimport requests\nimport re\nimport warnings\nwarnings.simplefilter('ignore', BiopythonWarning)\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f'Some issues with PDB now. Try again later...\\n(URL: {url}')\n\n\ndef get_seq_names(path_to_fasta):\n values = list(zip(*[(str(record.seq), record.id) for record in SeqIO.\n parse(path_to_fasta, 'fasta')]))\n if len(values) == 0:\n return []\n else:\n _, names = values\n return names\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == 'cif':\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir +\n f'{pdb_id}.{file_type}')\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-5": "from Bio import BiopythonWarning, SeqIO\nfrom Bio.PDB import MMCIFParser, Dice, PDBParser\nfrom Bio.SeqUtils import seq1\n\nimport time\nimport requests\nimport re\nimport warnings\n\nwarnings.simplefilter('ignore', BiopythonWarning)\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f\"Some issues with PDB now. Try again later...\\n(URL: {url}\")\n\n\ndef get_seq_names(path_to_fasta):\n values = list(zip(*[(str(record.seq), record.id)\n for record in SeqIO.parse(path_to_fasta, \"fasta\")]))\n if len(values) == 0:\n return []\n else:\n _, names = values\n return names\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0]\n if chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(\"\".join([residue.get_resname() for residue in self.chain]))\n seq_from_pdb = re.search(\"^X*(.*?)X*$\", seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n\n motif = motif.replace(\"-\", \"\")\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[end_on_indices]\n\n final_seq = \\\n [r.get_resname() for r in self.chain.get_residues()\n if start <= r.get_id()[1] <= end]\n\n if \"UNK\" in final_seq:\n with open(out_file, \"w\") as f:\n f.write(\"\")\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type=\"cif\"):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == \"cif\":\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir + f\"{pdb_id}.{file_type}\")\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
__author__ = 'Vicio'
from Conexion.conexion import Conexion
class ConexionList():
def __init__(self):
self.conexion = Conexion()
def selectClientes(self):
pass
def selectProveedores(self):
pass
|
normal
|
{
"blob_id": "6b4af452778bdf13ac18e8d260cf1c9176ca95e0",
"index": 8414,
"step-1": "<mask token>\n\n\nclass ConexionList:\n <mask token>\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass ConexionList:\n\n def __init__(self):\n self.conexion = Conexion()\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-3": "__author__ = 'Vicio'\n<mask token>\n\n\nclass ConexionList:\n\n def __init__(self):\n self.conexion = Conexion()\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-4": "__author__ = 'Vicio'\nfrom Conexion.conexion import Conexion\n\n\nclass ConexionList:\n\n def __init__(self):\n self.conexion = Conexion()\n\n def selectClientes(self):\n pass\n\n def selectProveedores(self):\n pass\n",
"step-5": "__author__ = 'Vicio'\nfrom Conexion.conexion import Conexion\n\n\nclass ConexionList():\n\n def __init__(self):\n self.conexion = Conexion()\n\n\n\n def selectClientes(self):\n pass\n\n\n def selectProveedores(self):\n pass\n\n\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = patterns('', url('appmanagement', views.appmanagement, name=
'appmanagement'), url('^.*', views.index, name='index'))
<|reserved_special_token_1|>
from django.conf.urls import patterns, url
from riskDashboard2 import views
urlpatterns = patterns('', url('appmanagement', views.appmanagement, name=
'appmanagement'), url('^.*', views.index, name='index'))
<|reserved_special_token_1|>
from django.conf.urls import patterns, url
from riskDashboard2 import views
urlpatterns = patterns('',
#url(r'getdata', views.vulnData, name='getdata'),
url(r'appmanagement', views.appmanagement, name='appmanagement'),
url(r'^.*', views.index, name='index'),
)
|
flexible
|
{
"blob_id": "3372d98ff91d90558a87293d4032820b1662d60b",
"index": 298,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('', url('appmanagement', views.appmanagement, name=\n 'appmanagement'), url('^.*', views.index, name='index'))\n",
"step-3": "from django.conf.urls import patterns, url\nfrom riskDashboard2 import views\nurlpatterns = patterns('', url('appmanagement', views.appmanagement, name=\n 'appmanagement'), url('^.*', views.index, name='index'))\n",
"step-4": "from django.conf.urls import patterns, url\n\nfrom riskDashboard2 import views\n\nurlpatterns = patterns('',\n #url(r'getdata', views.vulnData, name='getdata'),\n url(r'appmanagement', views.appmanagement, name='appmanagement'),\n url(r'^.*', views.index, name='index'),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Enumerator(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __next__(self):
return next(self.iterator)
<|reserved_special_token_0|>
def __iter__(self):
return self
<|reserved_special_token_1|>
class Enumerator(object):
<|reserved_special_token_0|>
def __init__(self, next):
self.iterator = next
def __next__(self):
return next(self.iterator)
<|reserved_special_token_0|>
def __iter__(self):
return self
<|reserved_special_token_1|>
class Enumerator(object):
<|reserved_special_token_0|>
def __init__(self, next):
self.iterator = next
def __next__(self):
return next(self.iterator)
next = __next__
def __iter__(self):
return self
<|reserved_special_token_1|>
class Enumerator(object):
"""For Python we just wrap the iterator"""
def __init__(self, next):
self.iterator = next
def __next__(self):
return next(self.iterator)
next = __next__
def __iter__(self):
return self
<|reserved_special_token_1|>
class Enumerator(object):
"""For Python we just wrap the iterator"""
def __init__(self, next):
self.iterator = next
def __next__(self):
return next(self.iterator)
# Python 2.7
next = __next__
def __iter__(self):
return self
|
flexible
|
{
"blob_id": "1ca20b0cd9217623ff039ab352acd09df8dfae1b",
"index": 8235,
"step-1": "class Enumerator(object):\n <mask token>\n <mask token>\n\n def __next__(self):\n return next(self.iterator)\n <mask token>\n\n def __iter__(self):\n return self\n",
"step-2": "class Enumerator(object):\n <mask token>\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n <mask token>\n\n def __iter__(self):\n return self\n",
"step-3": "class Enumerator(object):\n <mask token>\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n next = __next__\n\n def __iter__(self):\n return self\n",
"step-4": "class Enumerator(object):\n \"\"\"For Python we just wrap the iterator\"\"\"\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n next = __next__\n\n def __iter__(self):\n return self\n",
"step-5": "class Enumerator(object):\n \"\"\"For Python we just wrap the iterator\"\"\"\n\n def __init__(self, next):\n self.iterator = next\n\n def __next__(self):\n return next(self.iterator)\n\n # Python 2.7\n next = __next__\n\n def __iter__(self):\n return self\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
s=input()
count=0
while(len(s)!=1):
count+=1
a=0
for i in range(len(s)):
a+=int(s[i])
s=str(a)
print(count)
|
normal
|
{
"blob_id": "638e21e1eb1e2e14244628260d9c7ac179983721",
"index": 2541,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile len(s) != 1:\n count += 1\n a = 0\n for i in range(len(s)):\n a += int(s[i])\n s = str(a)\nprint(count)\n",
"step-3": "s = input()\ncount = 0\nwhile len(s) != 1:\n count += 1\n a = 0\n for i in range(len(s)):\n a += int(s[i])\n s = str(a)\nprint(count)\n",
"step-4": "s=input()\r\ncount=0\r\nwhile(len(s)!=1):\r\n count+=1\r\n a=0\r\n for i in range(len(s)):\r\n a+=int(s[i])\r\n s=str(a)\r\nprint(count)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import print_function
import os
import re
import xml.etree.ElementTree as ET
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
def _parse_build_script(script_path):
prj_dir = None
top_func_name = None
with open(script_path, 'r') as f:
for line in f.readlines():
if 'open_project' in line:
prj_dir = line.split()[-1]
elif 'set_top' in line:
top_func_name = line.split()[-1]
return prj_dir, top_func_name
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
# Get rid of namespaces (workaround to support two types of vivado_hls.app files)
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end = '')
|
normal
|
{
"blob_id": "7d173b0571c20dc8fcae884451e8f69ba3a05763",
"index": 8087,
"step-1": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\n<mask token>\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\n<mask token>\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-2": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\n<mask token>\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-3": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n return prj_dir, top_func_name\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-4": "from __future__ import print_function\nimport os\nimport re\nimport xml.etree.ElementTree as ET\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n return prj_dir, top_func_name\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-5": "from __future__ import print_function\nimport os\nimport re\nimport xml.etree.ElementTree as ET\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n\n prj_dir = None\n top_func_name = None\n\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')\n\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n \n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.format(prj_dir, hls_dir))\n return\n \n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n \n return prj_dir, top_func_name\n\ndef _find_solutions(sln_dir):\n solutions = []\n\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n # Get rid of namespaces (workaround to support two types of vivado_hls.app files)\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):\n solutions.append(sln_name)\n \n return solutions\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n \n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end = '')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
columns = ['account', 'name', 'Death', 'archetype', 'profession', 'elite',
'phases.All.actual_boss.dps', 'phases.All.actual.dps',
'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',
'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',
'phases.All.buffs.aegis', 'phases.All.buffs.alacrity',
'phases.All.buffs.assassins_presence',
'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',
'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',
'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',
'phases.All.buffs.glyph_of_empowerment',
'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',
'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',
'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',
'phases.All.buffs.quickness', 'phases.All.buffs.regen',
'phases.All.buffs.resist', 'phases.All.buffs.retaliation',
'phases.All.buffs.soothing_mist', 'phases.All.buffs.spirit_of_frost',
'phases.All.buffs.spotter', 'phases.All.buffs.stab',
'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',
'phases.All.buffs.sun_spirit', 'phases.All.buffs.swift',
'phases.All.buffs.vampiric_presence', 'phases.All.buffs.vigor',
'phases.All.buffs_out.aegis', 'phases.All.buffs_out.alacrity',
'phases.All.buffs_out.assassins_presence',
'phases.All.buffs_out.banner_defence',
'phases.All.buffs_out.banner_discipline',
'phases.All.buffs_out.banner_strength',
'phases.All.buffs_out.banner_tactics',
'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',
'phases.All.buffs_out.glyph_of_empowerment',
'phases.All.buffs_out.lead_attacks',
'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',
'phases.All.buffs_out.naturalistic_resonance',
'phases.All.buffs_out.pinpoint_distribution',
'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',
'phases.All.buffs_out.regen', 'phases.All.buffs_out.resist',
'phases.All.buffs_out.retaliation',
'phases.All.buffs_out.soothing_mist',
'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',
'phases.All.buffs_out.stab', 'phases.All.buffs_out.stone_spirit',
'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',
'phases.All.buffs_out.swift', 'phases.All.buffs_out.vampiric_presence',
'phases.All.buffs_out.vigor', 'phases.All.events.dead_time',
'phases.All.events.deaths', 'phases.All.events.disconnect_time',
'phases.All.events.disconnects', 'phases.All.events.down_time',
'phases.All.events.downs', 'phases.All.received.dps',
'phases.All.shielded.dps']
old_columns = ['account', 'name', 'Death', 'archetype', 'profession',
'elite', 'phases.All.actual_boss.dps', 'phases.All.actual.dps',
'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',
'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',
'phases.All.buffs.alacrity', 'phases.All.buffs.assassins_presence',
'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',
'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',
'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',
'phases.All.buffs.glyph_of_empowerment', 'phases.All.buffs.gotl',
'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',
'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',
'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',
'phases.All.buffs.quickness', 'phases.All.buffs.soothing_mist',
'phases.All.buffs.spirit_of_frost', 'phases.All.buffs.spotter',
'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',
'phases.All.buffs.sun_spirit', 'phases.All.buffs.vampiric_presence',
'phases.All.buffs_out.alacrity',
'phases.All.buffs_out.assassins_presence',
'phases.All.buffs_out.banner_defence',
'phases.All.buffs_out.banner_discipline',
'phases.All.buffs_out.banner_strength',
'phases.All.buffs_out.banner_tactics',
'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',
'phases.All.buffs_out.glyph_of_empowerment',
'phases.All.buffs_out.gotl', 'phases.All.buffs_out.lead_attacks',
'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',
'phases.All.buffs_out.naturalistic_resonance',
'phases.All.buffs_out.pinpoint_distribution',
'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',
'phases.All.buffs_out.retaliation',
'phases.All.buffs_out.soothing_mist',
'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',
'phases.All.buffs_out.stone_spirit',
'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',
'phases.All.buffs_out.vampiric_presence', 'phases.All.events.dead_time',
'phases.All.events.deaths', 'phases.All.events.disconnect_time',
'phases.All.events.disconnects', 'phases.All.events.down_time',
'phases.All.events.downs', 'phases.All.received.dps',
'phases.All.shielded.dps']
vg_mechanics = ['phases.All.mechanics.Bullets Eaten',
'phases.All.mechanics.Teleports']
gors_mechanics = ['phases.All.mechanics.Ghastly Imprisonments',
'phases.All.mechanics.Spectral Darkness',
'phases.All.mechanics.Unmitigated Spectral Impacts']
sab_mechanics = []
sloth_mechanics = ['phases.All.mechanics.Spores Blocked',
'phases.All.mechanics.Spores Received',
'phases.All.mechanics.Tantrum Knockdowns',
'phases.All.mechanics.Toxic Cloud Breathed',
'phases.All.mechanics.Volatile Poison Carrier']
matt_mechanics = ['phases.All.mechanics.Burning Stacks Received',
'phases.All.mechanics.Corrupted',
'phases.All.mechanics.Moved While Unbalanced',
'phases.All.mechanics.Sacrificed',
'phases.All.mechanics.Shards Absorbed',
'phases.All.mechanics.Surrender',
'phases.All.mechanics.Well of the Profane Carrier']
kc_mechanics = ['phases.All.mechanics.Correct Orb',
'phases.All.mechanics.Wrong Orb']
xera_mechanics = ['phases.All.mechanics.Derangement']
cairn_mechanics = ['phases.All.mechanics.Displacement',
'phases.All.mechanics.Meteor Swarm',
'phases.All.mechanics.Shared Agony',
'phases.All.mechanics.Spatial Manipulation']
mo_mechanics = ['phases.All.mechanics.Claim', 'phases.All.mechanics.Dispel',
'phases.All.mechanics.Enemy Tile', 'phases.All.mechanics.Protect',
"phases.All.mechanics.Soldier's Aura"]
sam_mechanics = ['phases.All.mechanics.Anguished Bolt',
'phases.All.mechanics.Big Friend', 'phases.All.mechanics.Bludgeon',
'phases.All.mechanics.Charge', 'phases.All.mechanics.Claw',
'phases.All.mechanics.Fixate',
'phases.All.mechanics.Inevitable Betrayl',
'phases.All.mechanics.Prisoner Sweep', 'phases.All.mechanics.Shockwave',
'phases.All.mechanics.Small Friend', 'phases.All.mechanics.Spear Impact']
deimos_mechanics = ['phases.All.mechanics.Annihilate',
'phases.All.mechanics.Demonic Shockwave',
'phases.All.mechanics.Mind Crush', 'phases.All.mechanics.Rapid Decay',
'phases.All.mechanics.Soul Feast', 'phases.All.mechanics.Tear Consumed',
'phases.All.mechanics.Teleports']
sh_mechanics = ['phases.All.mechanics.Inner Vortex',
'phases.All.mechanics.Necrosis Received',
'phases.All.mechanics.Outer Vortex', 'phases.All.mechanics.Quad Slash',
'phases.All.mechanics.Scythe Hits', 'phases.All.mechanics.Soul Rift']
dhuum_mechanics = ['phases.All.mechanics.Death Marked',
'phases.All.mechanics.Dhuum Gaze', 'phases.All.mechanics.Fissured',
'phases.All.mechanics.Messenger', 'phases.All.mechanics.Putrid Bomb',
'phases.All.mechanics.Shackle Hits', 'phases.All.mechanics.Snatched',
'phases.All.mechanics.Sucked']
|
flexible
|
{
"blob_id": "fa948838b5c2d688fe8c748166f23ffc8e677f93",
"index": 9265,
"step-1": "<mask token>\n",
"step-2": "columns = ['account', 'name', 'Death', 'archetype', 'profession', 'elite',\n 'phases.All.actual_boss.dps', 'phases.All.actual.dps',\n 'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',\n 'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',\n 'phases.All.buffs.aegis', 'phases.All.buffs.alacrity',\n 'phases.All.buffs.assassins_presence',\n 'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',\n 'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',\n 'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',\n 'phases.All.buffs.glyph_of_empowerment',\n 'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',\n 'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',\n 'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',\n 'phases.All.buffs.quickness', 'phases.All.buffs.regen',\n 'phases.All.buffs.resist', 'phases.All.buffs.retaliation',\n 'phases.All.buffs.soothing_mist', 'phases.All.buffs.spirit_of_frost',\n 'phases.All.buffs.spotter', 'phases.All.buffs.stab',\n 'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',\n 'phases.All.buffs.sun_spirit', 'phases.All.buffs.swift',\n 'phases.All.buffs.vampiric_presence', 'phases.All.buffs.vigor',\n 'phases.All.buffs_out.aegis', 'phases.All.buffs_out.alacrity',\n 'phases.All.buffs_out.assassins_presence',\n 'phases.All.buffs_out.banner_defence',\n 'phases.All.buffs_out.banner_discipline',\n 'phases.All.buffs_out.banner_strength',\n 'phases.All.buffs_out.banner_tactics',\n 'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',\n 'phases.All.buffs_out.glyph_of_empowerment',\n 'phases.All.buffs_out.lead_attacks',\n 'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',\n 'phases.All.buffs_out.naturalistic_resonance',\n 'phases.All.buffs_out.pinpoint_distribution',\n 'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',\n 'phases.All.buffs_out.regen', 'phases.All.buffs_out.resist',\n 'phases.All.buffs_out.retaliation',\n 'phases.All.buffs_out.soothing_mist',\n 'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',\n 'phases.All.buffs_out.stab', 'phases.All.buffs_out.stone_spirit',\n 'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',\n 'phases.All.buffs_out.swift', 'phases.All.buffs_out.vampiric_presence',\n 'phases.All.buffs_out.vigor', 'phases.All.events.dead_time',\n 'phases.All.events.deaths', 'phases.All.events.disconnect_time',\n 'phases.All.events.disconnects', 'phases.All.events.down_time',\n 'phases.All.events.downs', 'phases.All.received.dps',\n 'phases.All.shielded.dps']\nold_columns = ['account', 'name', 'Death', 'archetype', 'profession',\n 'elite', 'phases.All.actual_boss.dps', 'phases.All.actual.dps',\n 'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',\n 'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',\n 'phases.All.buffs.alacrity', 'phases.All.buffs.assassins_presence',\n 'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',\n 'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',\n 'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',\n 'phases.All.buffs.glyph_of_empowerment', 'phases.All.buffs.gotl',\n 'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',\n 'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',\n 'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',\n 'phases.All.buffs.quickness', 'phases.All.buffs.soothing_mist',\n 'phases.All.buffs.spirit_of_frost', 'phases.All.buffs.spotter',\n 'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',\n 'phases.All.buffs.sun_spirit', 'phases.All.buffs.vampiric_presence',\n 'phases.All.buffs_out.alacrity',\n 'phases.All.buffs_out.assassins_presence',\n 'phases.All.buffs_out.banner_defence',\n 'phases.All.buffs_out.banner_discipline',\n 'phases.All.buffs_out.banner_strength',\n 'phases.All.buffs_out.banner_tactics',\n 'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',\n 'phases.All.buffs_out.glyph_of_empowerment',\n 'phases.All.buffs_out.gotl', 'phases.All.buffs_out.lead_attacks',\n 'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',\n 'phases.All.buffs_out.naturalistic_resonance',\n 'phases.All.buffs_out.pinpoint_distribution',\n 'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',\n 'phases.All.buffs_out.retaliation',\n 'phases.All.buffs_out.soothing_mist',\n 'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',\n 'phases.All.buffs_out.stone_spirit',\n 'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',\n 'phases.All.buffs_out.vampiric_presence', 'phases.All.events.dead_time',\n 'phases.All.events.deaths', 'phases.All.events.disconnect_time',\n 'phases.All.events.disconnects', 'phases.All.events.down_time',\n 'phases.All.events.downs', 'phases.All.received.dps',\n 'phases.All.shielded.dps']\nvg_mechanics = ['phases.All.mechanics.Bullets Eaten',\n 'phases.All.mechanics.Teleports']\ngors_mechanics = ['phases.All.mechanics.Ghastly Imprisonments',\n 'phases.All.mechanics.Spectral Darkness',\n 'phases.All.mechanics.Unmitigated Spectral Impacts']\nsab_mechanics = []\nsloth_mechanics = ['phases.All.mechanics.Spores Blocked',\n 'phases.All.mechanics.Spores Received',\n 'phases.All.mechanics.Tantrum Knockdowns',\n 'phases.All.mechanics.Toxic Cloud Breathed',\n 'phases.All.mechanics.Volatile Poison Carrier']\nmatt_mechanics = ['phases.All.mechanics.Burning Stacks Received',\n 'phases.All.mechanics.Corrupted',\n 'phases.All.mechanics.Moved While Unbalanced',\n 'phases.All.mechanics.Sacrificed',\n 'phases.All.mechanics.Shards Absorbed',\n 'phases.All.mechanics.Surrender',\n 'phases.All.mechanics.Well of the Profane Carrier']\nkc_mechanics = ['phases.All.mechanics.Correct Orb',\n 'phases.All.mechanics.Wrong Orb']\nxera_mechanics = ['phases.All.mechanics.Derangement']\ncairn_mechanics = ['phases.All.mechanics.Displacement',\n 'phases.All.mechanics.Meteor Swarm',\n 'phases.All.mechanics.Shared Agony',\n 'phases.All.mechanics.Spatial Manipulation']\nmo_mechanics = ['phases.All.mechanics.Claim', 'phases.All.mechanics.Dispel',\n 'phases.All.mechanics.Enemy Tile', 'phases.All.mechanics.Protect',\n \"phases.All.mechanics.Soldier's Aura\"]\nsam_mechanics = ['phases.All.mechanics.Anguished Bolt',\n 'phases.All.mechanics.Big Friend', 'phases.All.mechanics.Bludgeon',\n 'phases.All.mechanics.Charge', 'phases.All.mechanics.Claw',\n 'phases.All.mechanics.Fixate',\n 'phases.All.mechanics.Inevitable Betrayl',\n 'phases.All.mechanics.Prisoner Sweep', 'phases.All.mechanics.Shockwave',\n 'phases.All.mechanics.Small Friend', 'phases.All.mechanics.Spear Impact']\ndeimos_mechanics = ['phases.All.mechanics.Annihilate',\n 'phases.All.mechanics.Demonic Shockwave',\n 'phases.All.mechanics.Mind Crush', 'phases.All.mechanics.Rapid Decay',\n 'phases.All.mechanics.Soul Feast', 'phases.All.mechanics.Tear Consumed',\n 'phases.All.mechanics.Teleports']\nsh_mechanics = ['phases.All.mechanics.Inner Vortex',\n 'phases.All.mechanics.Necrosis Received',\n 'phases.All.mechanics.Outer Vortex', 'phases.All.mechanics.Quad Slash',\n 'phases.All.mechanics.Scythe Hits', 'phases.All.mechanics.Soul Rift']\ndhuum_mechanics = ['phases.All.mechanics.Death Marked',\n 'phases.All.mechanics.Dhuum Gaze', 'phases.All.mechanics.Fissured',\n 'phases.All.mechanics.Messenger', 'phases.All.mechanics.Putrid Bomb',\n 'phases.All.mechanics.Shackle Hits', 'phases.All.mechanics.Snatched',\n 'phases.All.mechanics.Sucked']\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
@app.route('/portfolio', methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
portfolio0 = ['195930', '133690', '273130', '284430', '183700']
portfolio1 = ['195930', '133690', '239660', '284430', '183700']
portfolio2 = ['195930', '133690', '239660', '278620', '284430']
portfolio3 = ['195930', '278530', '133690', '239660', '284430']
portfolio4 = ['195930', '278530', '277630', '133690', '284430']
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ''
if is_oversea == '0':
db = 'ETF_US'
else:
db = 'ETF_KR'
print(db)
with oracle_engine.connect() as conn:
try:
sql = 'select * from ' + db + ' where invest_type=:1'
results = conn.execute(sql, invest_type).fetchall()
name_list = []
risk_list = []
weight_list = []
returns_1y = []
returns_3y = []
returns_5y = []
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
sql = 'select * from RETURN'
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0':
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
elif invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'
].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] ==
ticker]['return'].map(float).values)
date_list[i] = list(return_df[return_df['ticker'] ==
ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
if is_oversea == '0':
sql = 'select * from pf_us'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else:
sql = 'select * from pf_kr'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf})
except Exception as e:
print(e)
count_list = [0, 0, 0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price,
KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,
KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,
KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,
KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
@app.route('/portfolio', methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
portfolio0 = ['195930', '133690', '273130', '284430', '183700']
portfolio1 = ['195930', '133690', '239660', '284430', '183700']
portfolio2 = ['195930', '133690', '239660', '278620', '284430']
portfolio3 = ['195930', '278530', '133690', '239660', '284430']
portfolio4 = ['195930', '278530', '277630', '133690', '284430']
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ''
if is_oversea == '0':
db = 'ETF_US'
else:
db = 'ETF_KR'
print(db)
with oracle_engine.connect() as conn:
try:
sql = 'select * from ' + db + ' where invest_type=:1'
results = conn.execute(sql, invest_type).fetchall()
name_list = []
risk_list = []
weight_list = []
returns_1y = []
returns_3y = []
returns_5y = []
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
sql = 'select * from RETURN'
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0':
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
elif invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'
].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] ==
ticker]['return'].map(float).values)
date_list[i] = list(return_df[return_df['ticker'] ==
ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
if is_oversea == '0':
sql = 'select * from pf_us'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else:
sql = 'select * from pf_kr'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf})
except Exception as e:
print(e)
count_list = [0, 0, 0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price,
KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,
KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,
KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,
KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
import json
import joblib
import numpy as np
import datetime
import sqlalchemy as sa
import cx_Oracle
import pandas as pd
from flask import Flask, render_template, session, request, redirect, url_for
app = Flask(__name__)
oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/survey', methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,
s3, s4, s5]
i_list = list(map(int, i_list))
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = '안정추구형'
elif type_num == 1:
invest_type = '안정형'
elif type_num == 2:
invest_type = '적극투자형'
elif type_num == 3:
invest_type = '공격투자형'
else:
invest_type = '위험중립형'
return render_template('result.html', KEY_INVEST_TYPE=invest_type,
IS_OVERSEA=is_oversea)
@app.route('/portfolio', methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
portfolio0 = ['195930', '133690', '273130', '284430', '183700']
portfolio1 = ['195930', '133690', '239660', '284430', '183700']
portfolio2 = ['195930', '133690', '239660', '278620', '284430']
portfolio3 = ['195930', '278530', '133690', '239660', '284430']
portfolio4 = ['195930', '278530', '277630', '133690', '284430']
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ''
if is_oversea == '0':
db = 'ETF_US'
else:
db = 'ETF_KR'
print(db)
with oracle_engine.connect() as conn:
try:
sql = 'select * from ' + db + ' where invest_type=:1'
results = conn.execute(sql, invest_type).fetchall()
name_list = []
risk_list = []
weight_list = []
returns_1y = []
returns_3y = []
returns_5y = []
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
sql = 'select * from RETURN'
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0':
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
elif invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'
].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] ==
ticker]['return'].map(float).values)
date_list[i] = list(return_df[return_df['ticker'] ==
ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
if is_oversea == '0':
sql = 'select * from pf_us'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else:
sql = 'select * from pf_kr'
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf})
except Exception as e:
print(e)
count_list = [0, 0, 0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price,
KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,
KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,
KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,
KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
import json
import joblib
import numpy as np
import datetime
import sqlalchemy as sa
import cx_Oracle
import pandas as pd
from flask import Flask, render_template, session, request, redirect, url_for
app = Flask(__name__)
oracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')
@app.route("/")
def index():
return render_template('index.html')
@app.route("/survey", methods=['POST', 'GET'])
def survey():
if request.method == 'GET':
return render_template('survey.html')
if request.method == 'POST':
is_oversea = request.form['oversea']
gender = request.form['gender']
age = request.form['age']
income = request.form['income']
knowledge = request.form['knowledge']
exp = request.form['exp']
risk = request.form['risk']
term = request.form['term']
s1 = request.form['s1']
s2 = request.form['s2']
s3 = request.form['s3']
s4 = request.form['s4']
s5 = request.form['s5']
i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2, s3, s4, s5]
i_list = list(map(int, i_list)) # str -> int
score = sum(i_list)
i_list.append(score)
data = np.array(i_list).reshape(1, -1)
clf = joblib.load('./models/rf_model.pkl')
type_num = clf.predict(data)
if type_num == 0:
invest_type = "안정추구형"
elif type_num == 1:
invest_type = "안정형"
elif type_num == 2:
invest_type = "적극투자형"
elif type_num == 3:
invest_type = "공격투자형"
else:
invest_type = "위험중립형"
return render_template('result.html', KEY_INVEST_TYPE=invest_type, IS_OVERSEA=is_oversea)
@app.route("/portfolio", methods=['POST', 'GET'])
def portfolio():
if request.method == 'POST':
# 국내
portfolio0 = ['195930', '133690', '273130', '284430', '183700'] # 안정형
portfolio1 = ['195930', '133690', '239660', '284430', '183700'] # 안정추구형
portfolio2 = ['195930', '133690', '239660', '278620', '284430'] # 위험중립형
portfolio3 = ['195930', '278530', '133690', '239660', '284430'] # 적극투자형
portfolio4 = ['195930', '278530', '277630', '133690', '284430'] # 공격투자형
# 미국
portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ'] # 안정형
portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL'] # 안정추구형
portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK'] # 위험중립형
portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK'] # 적극투자형
portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK'] # 공격투자형
price = request.form['price']
invest_type = request.form['type']
risk_no = request.form['risk_no']
is_oversea = request.form['oversea']
db = ""
if is_oversea == '0': # 해외 ETF
db = "ETF_US"
else: # 국내 ETF
db = "ETF_KR"
print(db)
with oracle_engine.connect() as conn:
try:
sql = "select * from " + db + " where invest_type=:1"
results = conn.execute(sql, (invest_type)).fetchall()
name_list = [] # 상품명
risk_list = [] # 위험등급
weight_list = [] # 가중치
returns_1y = [] # 1년 수익률
returns_3y = [] # 3년 수익률
returns_5y = [] # 5년 수익률
for etf in results:
name_list.append(etf[0])
risk_list.append(etf[2])
weight_list.append(etf[3])
returns_1y.append(etf[4])
returns_3y.append(etf[5])
returns_5y.append(etf[6])
# 투자성향 상품별 과거 수익률 데이터 가져오기
sql = "select * from RETURN"
return_df = pd.read_sql(sql, conn)
etf_list = []
return_list = {}
date_list = {}
if is_oversea == '0': # 해외
if invest_type == '안정형':
portfolio_data = portfolio5
elif invest_type == '안정추구형':
portfolio_data = portfolio6
elif invest_type == '위험중립형':
portfolio_data = portfolio7
elif invest_type == '적극투자형':
portfolio_data = portfolio8
else:
portfolio_data = portfolio9
else:
if invest_type == '안정형':
portfolio_data = portfolio0
elif invest_type == '안정추구형':
portfolio_data = portfolio1
elif invest_type == '위험중립형':
portfolio_data = portfolio2
elif invest_type == '적극투자형':
portfolio_data = portfolio3
else:
portfolio_data = portfolio4
for i, ticker in enumerate(portfolio_data):
name = return_df[return_df['ticker'] == ticker]['name'].unique().tolist()[0]
if name not in etf_list:
etf_list.append(name)
return_list[i] = list(return_df[return_df['ticker'] == ticker]['return'].map(float).values)
date_list[i] = list(
return_df[return_df['ticker'] == ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())
# 포트폴리오 수익률 데이터 가져오기
if is_oversea == '0': # 해외
sql = "select * from pf_us"
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[46:]
else: # 국내
sql = "select * from pf_kr"
pf_df = pd.read_sql(sql, conn)
pf_df = pf_df[140:]
pf_list = pf_df[invest_type].map(float).tolist()
bt_data = []
for i, pf in enumerate(pf_list):
bt_data.append({'x': i, 'y': pf});
except Exception as e:
print(e)
# 투자 등급 카운팅 (파이차트에 비중 나타내기 위해 사용)
count_list = [0,0,0]
for risk_type in risk_list:
if risk_type == '위험':
count_list[0] += 1
elif risk_type == '중립':
count_list[1] += 1
else:
count_list[2] += 1
return render_template('portfolio.html', KEY_PRICE=price, KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,
KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list, KEY_COUNT_LIST=count_list,
KEY_RETURN_1Y=returns_1y, KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,
KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list, KEY_DATE_LIST=date_list,
KEY_BACKTESTING=bt_data)
if __name__ == '__main__':
app.run(debug=True)
|
flexible
|
{
"blob_id": "74aa93bf3731d4e3ddb920bedc7daced50b4f2c3",
"index": 1565,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\n@app.route('/portfolio', methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n portfolio0 = ['195930', '133690', '273130', '284430', '183700']\n portfolio1 = ['195930', '133690', '239660', '284430', '183700']\n portfolio2 = ['195930', '133690', '239660', '278620', '284430']\n portfolio3 = ['195930', '278530', '133690', '239660', '284430']\n portfolio4 = ['195930', '278530', '277630', '133690', '284430']\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n db = ''\n if is_oversea == '0':\n db = 'ETF_US'\n else:\n db = 'ETF_KR'\n print(db)\n with oracle_engine.connect() as conn:\n try:\n sql = 'select * from ' + db + ' where invest_type=:1'\n results = conn.execute(sql, invest_type).fetchall()\n name_list = []\n risk_list = []\n weight_list = []\n returns_1y = []\n returns_3y = []\n returns_5y = []\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n sql = 'select * from RETURN'\n return_df = pd.read_sql(sql, conn)\n etf_list = []\n return_list = {}\n date_list = {}\n if is_oversea == '0':\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n elif invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'\n ].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n return_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['return'].map(float).values)\n date_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n if is_oversea == '0':\n sql = 'select * from pf_us'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else:\n sql = 'select * from pf_kr'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n pf_list = pf_df[invest_type].map(float).tolist()\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf})\n except Exception as e:\n print(e)\n count_list = [0, 0, 0]\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n return render_template('portfolio.html', KEY_PRICE=price,\n KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,\n KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,\n KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,\n KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\noracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\n@app.route('/portfolio', methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n portfolio0 = ['195930', '133690', '273130', '284430', '183700']\n portfolio1 = ['195930', '133690', '239660', '284430', '183700']\n portfolio2 = ['195930', '133690', '239660', '278620', '284430']\n portfolio3 = ['195930', '278530', '133690', '239660', '284430']\n portfolio4 = ['195930', '278530', '277630', '133690', '284430']\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n db = ''\n if is_oversea == '0':\n db = 'ETF_US'\n else:\n db = 'ETF_KR'\n print(db)\n with oracle_engine.connect() as conn:\n try:\n sql = 'select * from ' + db + ' where invest_type=:1'\n results = conn.execute(sql, invest_type).fetchall()\n name_list = []\n risk_list = []\n weight_list = []\n returns_1y = []\n returns_3y = []\n returns_5y = []\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n sql = 'select * from RETURN'\n return_df = pd.read_sql(sql, conn)\n etf_list = []\n return_list = {}\n date_list = {}\n if is_oversea == '0':\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n elif invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'\n ].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n return_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['return'].map(float).values)\n date_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n if is_oversea == '0':\n sql = 'select * from pf_us'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else:\n sql = 'select * from pf_kr'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n pf_list = pf_df[invest_type].map(float).tolist()\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf})\n except Exception as e:\n print(e)\n count_list = [0, 0, 0]\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n return render_template('portfolio.html', KEY_PRICE=price,\n KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,\n KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,\n KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,\n KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "import json\nimport joblib\nimport numpy as np\nimport datetime\nimport sqlalchemy as sa\nimport cx_Oracle\nimport pandas as pd\nfrom flask import Flask, render_template, session, request, redirect, url_for\napp = Flask(__name__)\noracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/survey', methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2,\n s3, s4, s5]\n i_list = list(map(int, i_list))\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n if type_num == 0:\n invest_type = '안정추구형'\n elif type_num == 1:\n invest_type = '안정형'\n elif type_num == 2:\n invest_type = '적극투자형'\n elif type_num == 3:\n invest_type = '공격투자형'\n else:\n invest_type = '위험중립형'\n return render_template('result.html', KEY_INVEST_TYPE=invest_type,\n IS_OVERSEA=is_oversea)\n\n\n@app.route('/portfolio', methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n portfolio0 = ['195930', '133690', '273130', '284430', '183700']\n portfolio1 = ['195930', '133690', '239660', '284430', '183700']\n portfolio2 = ['195930', '133690', '239660', '278620', '284430']\n portfolio3 = ['195930', '278530', '133690', '239660', '284430']\n portfolio4 = ['195930', '278530', '277630', '133690', '284430']\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ']\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL']\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK']\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK']\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK']\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n db = ''\n if is_oversea == '0':\n db = 'ETF_US'\n else:\n db = 'ETF_KR'\n print(db)\n with oracle_engine.connect() as conn:\n try:\n sql = 'select * from ' + db + ' where invest_type=:1'\n results = conn.execute(sql, invest_type).fetchall()\n name_list = []\n risk_list = []\n weight_list = []\n returns_1y = []\n returns_3y = []\n returns_5y = []\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n sql = 'select * from RETURN'\n return_df = pd.read_sql(sql, conn)\n etf_list = []\n return_list = {}\n date_list = {}\n if is_oversea == '0':\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n elif invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'\n ].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n return_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['return'].map(float).values)\n date_list[i] = list(return_df[return_df['ticker'] ==\n ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n if is_oversea == '0':\n sql = 'select * from pf_us'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else:\n sql = 'select * from pf_kr'\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n pf_list = pf_df[invest_type].map(float).tolist()\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf})\n except Exception as e:\n print(e)\n count_list = [0, 0, 0]\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n return render_template('portfolio.html', KEY_PRICE=price,\n KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list,\n KEY_COUNT_LIST=count_list, KEY_RETURN_1Y=returns_1y,\n KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list,\n KEY_DATE_LIST=date_list, KEY_BACKTESTING=bt_data)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "import json\nimport joblib\nimport numpy as np\nimport datetime\nimport sqlalchemy as sa\nimport cx_Oracle\nimport pandas as pd\n\nfrom flask import Flask, render_template, session, request, redirect, url_for\n\napp = Flask(__name__)\noracle_engine = sa.create_engine('oracle://ft:1234@localhost:1522/xe')\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/survey\", methods=['POST', 'GET'])\ndef survey():\n if request.method == 'GET':\n return render_template('survey.html')\n\n if request.method == 'POST':\n is_oversea = request.form['oversea']\n\n gender = request.form['gender']\n age = request.form['age']\n income = request.form['income']\n knowledge = request.form['knowledge']\n exp = request.form['exp']\n risk = request.form['risk']\n term = request.form['term']\n s1 = request.form['s1']\n s2 = request.form['s2']\n s3 = request.form['s3']\n s4 = request.form['s4']\n s5 = request.form['s5']\n\n i_list = [gender, age, income, knowledge, exp, risk, term, s1, s2, s3, s4, s5]\n i_list = list(map(int, i_list)) # str -> int\n score = sum(i_list)\n i_list.append(score)\n data = np.array(i_list).reshape(1, -1)\n\n clf = joblib.load('./models/rf_model.pkl')\n type_num = clf.predict(data)\n\n if type_num == 0:\n invest_type = \"안정추구형\"\n elif type_num == 1:\n invest_type = \"안정형\"\n elif type_num == 2:\n invest_type = \"적극투자형\"\n elif type_num == 3:\n invest_type = \"공격투자형\"\n else:\n invest_type = \"위험중립형\"\n\n return render_template('result.html', KEY_INVEST_TYPE=invest_type, IS_OVERSEA=is_oversea)\n\n@app.route(\"/portfolio\", methods=['POST', 'GET'])\ndef portfolio():\n if request.method == 'POST':\n # 국내\n portfolio0 = ['195930', '133690', '273130', '284430', '183700'] # 안정형\n portfolio1 = ['195930', '133690', '239660', '284430', '183700'] # 안정추구형\n portfolio2 = ['195930', '133690', '239660', '278620', '284430'] # 위험중립형\n portfolio3 = ['195930', '278530', '133690', '239660', '284430'] # 적극투자형\n portfolio4 = ['195930', '278530', '277630', '133690', '284430'] # 공격투자형\n\n # 미국\n portfolio5 = ['OILK', 'BBJP', 'ARKK', 'PALL', 'QQQ'] # 안정형\n portfolio6 = ['OILK', 'SPHB', 'BBJP', 'ARKK', 'PALL'] # 안정추구형\n portfolio7 = ['OILK', 'SPHB', 'JAGG', 'BBJP', 'ARKK'] # 위험중립형\n portfolio8 = ['OILK', 'BBCA', 'SPHB', 'JAGG', 'ARKK'] # 적극투자형\n portfolio9 = ['OILK', 'BBCA', 'BBEU', 'BBJP', 'ARKK'] # 공격투자형\n\n price = request.form['price']\n invest_type = request.form['type']\n risk_no = request.form['risk_no']\n is_oversea = request.form['oversea']\n\n db = \"\"\n\n if is_oversea == '0': # 해외 ETF\n db = \"ETF_US\"\n else: # 국내 ETF\n db = \"ETF_KR\"\n\n print(db)\n\n with oracle_engine.connect() as conn:\n try:\n sql = \"select * from \" + db + \" where invest_type=:1\"\n results = conn.execute(sql, (invest_type)).fetchall()\n\n name_list = [] # 상품명\n risk_list = [] # 위험등급\n weight_list = [] # 가중치\n returns_1y = [] # 1년 수익률\n returns_3y = [] # 3년 수익률\n returns_5y = [] # 5년 수익률\n\n for etf in results:\n name_list.append(etf[0])\n risk_list.append(etf[2])\n weight_list.append(etf[3])\n returns_1y.append(etf[4])\n returns_3y.append(etf[5])\n returns_5y.append(etf[6])\n\n # 투자성향 상품별 과거 수익률 데이터 가져오기\n sql = \"select * from RETURN\"\n return_df = pd.read_sql(sql, conn)\n\n etf_list = []\n return_list = {}\n date_list = {}\n\n if is_oversea == '0': # 해외\n if invest_type == '안정형':\n portfolio_data = portfolio5\n elif invest_type == '안정추구형':\n portfolio_data = portfolio6\n elif invest_type == '위험중립형':\n portfolio_data = portfolio7\n elif invest_type == '적극투자형':\n portfolio_data = portfolio8\n else:\n portfolio_data = portfolio9\n else:\n if invest_type == '안정형':\n portfolio_data = portfolio0\n elif invest_type == '안정추구형':\n portfolio_data = portfolio1\n elif invest_type == '위험중립형':\n portfolio_data = portfolio2\n elif invest_type == '적극투자형':\n portfolio_data = portfolio3\n else:\n portfolio_data = portfolio4\n\n for i, ticker in enumerate(portfolio_data):\n name = return_df[return_df['ticker'] == ticker]['name'].unique().tolist()[0]\n if name not in etf_list:\n etf_list.append(name)\n\n return_list[i] = list(return_df[return_df['ticker'] == ticker]['return'].map(float).values)\n date_list[i] = list(\n return_df[return_df['ticker'] == ticker]['rdate'].dt.strftime('%Y-%m-%d').unique())\n\n # 포트폴리오 수익률 데이터 가져오기\n if is_oversea == '0': # 해외\n sql = \"select * from pf_us\"\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[46:]\n else: # 국내\n sql = \"select * from pf_kr\"\n pf_df = pd.read_sql(sql, conn)\n pf_df = pf_df[140:]\n\n pf_list = pf_df[invest_type].map(float).tolist()\n\n bt_data = []\n for i, pf in enumerate(pf_list):\n bt_data.append({'x': i, 'y': pf});\n\n except Exception as e:\n print(e)\n\n # 투자 등급 카운팅 (파이차트에 비중 나타내기 위해 사용)\n count_list = [0,0,0]\n\n for risk_type in risk_list:\n if risk_type == '위험':\n count_list[0] += 1\n elif risk_type == '중립':\n count_list[1] += 1\n else:\n count_list[2] += 1\n\n return render_template('portfolio.html', KEY_PRICE=price, KEY_INVEST_TYPE=invest_type, KEY_NAME_LIST=name_list,\n KEY_RISK_LIST=risk_list, KEY_WEIGHT_LIST=weight_list, KEY_COUNT_LIST=count_list,\n KEY_RETURN_1Y=returns_1y, KEY_RETURN_3Y=returns_3y, KEY_RETURN_5Y=returns_5y,\n KEY_ETF_LIST=etf_list, KEY_RETURN_LIST=return_list, KEY_DATE_LIST=date_list,\n KEY_BACKTESTING=bt_data)\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from requests import get
from bs4 import BeautifulSoup, SoupStrainer
import httplib2
import re
from win32printing import Printer
def getLinks(url):
links = []
document = BeautifulSoup(response, "html.parser")
for element in document.findAll('a', href=re.compile(".pdf$")):
links.append(element.get('href'))
return links
site = 'https://greenteapress.com/wp/think-python/'
http = httplib2.Http()
status, response = http.request(site)
pdf_links = getLinks(response)
files = []
for link in pdf_links:
pdf_file = requests.get(url)
files.append(pdf_file)
with Printer(linegap=1) as printer:
for pdf_file in files:
printer.text(pdf_file)
|
normal
|
{
"blob_id": "dbb007af79b2da2b5474281759c2bcce2a836fb5",
"index": 1254,
"step-1": "<mask token>\n\n\ndef getLinks(url):\n links = []\n document = BeautifulSoup(response, 'html.parser')\n for element in document.findAll('a', href=re.compile('.pdf$')):\n links.append(element.get('href'))\n return links\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getLinks(url):\n links = []\n document = BeautifulSoup(response, 'html.parser')\n for element in document.findAll('a', href=re.compile('.pdf$')):\n links.append(element.get('href'))\n return links\n\n\n<mask token>\nfor link in pdf_links:\n pdf_file = requests.get(url)\n files.append(pdf_file)\nwith Printer(linegap=1) as printer:\n for pdf_file in files:\n printer.text(pdf_file)\n",
"step-3": "<mask token>\n\n\ndef getLinks(url):\n links = []\n document = BeautifulSoup(response, 'html.parser')\n for element in document.findAll('a', href=re.compile('.pdf$')):\n links.append(element.get('href'))\n return links\n\n\nsite = 'https://greenteapress.com/wp/think-python/'\nhttp = httplib2.Http()\nstatus, response = http.request(site)\npdf_links = getLinks(response)\nfiles = []\nfor link in pdf_links:\n pdf_file = requests.get(url)\n files.append(pdf_file)\nwith Printer(linegap=1) as printer:\n for pdf_file in files:\n printer.text(pdf_file)\n",
"step-4": "from requests import get\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport httplib2\nimport re\nfrom win32printing import Printer\n\n\ndef getLinks(url):\n links = []\n document = BeautifulSoup(response, 'html.parser')\n for element in document.findAll('a', href=re.compile('.pdf$')):\n links.append(element.get('href'))\n return links\n\n\nsite = 'https://greenteapress.com/wp/think-python/'\nhttp = httplib2.Http()\nstatus, response = http.request(site)\npdf_links = getLinks(response)\nfiles = []\nfor link in pdf_links:\n pdf_file = requests.get(url)\n files.append(pdf_file)\nwith Printer(linegap=1) as printer:\n for pdf_file in files:\n printer.text(pdf_file)\n",
"step-5": "from requests import get\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport httplib2\nimport re\nfrom win32printing import Printer\n\ndef getLinks(url):\n links = []\n document = BeautifulSoup(response, \"html.parser\")\n\n for element in document.findAll('a', href=re.compile(\".pdf$\")):\n links.append(element.get('href'))\n\n return links\n\nsite = 'https://greenteapress.com/wp/think-python/'\n\nhttp = httplib2.Http()\nstatus, response = http.request(site)\npdf_links = getLinks(response) \n\nfiles = []\n\nfor link in pdf_links:\n pdf_file = requests.get(url)\n files.append(pdf_file)\n\nwith Printer(linegap=1) as printer:\n for pdf_file in files:\n printer.text(pdf_file)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
from collections import defaultdict
from docopt import docopt
__doc__ = """{f}
Usage:
{f} <used_file>
{f} -h | --help
Options:
-h --help Show this screen and exit.
""".format(f=__file__)
args = docopt(__doc__)
used_file = args['<used_file>']
exceed_list = []
user_limit_dict = defaultdict(float)
user_limit_f = open('/opt/uge/Accounting_Statistics/etc/user_limit_py.csv', 'r')
reader = csv.reader(user_limit_f)
header = next(reader)
for row in reader:
user_limit_dict[row[0]] = float(row[1])
print user_limit_dict
used_f = open(used_file, 'r')
reader = csv.DictReader(used_f)
for row in reader:
print row
|
normal
|
{
"blob_id": "40b6d62f1e360c0df19b7e98fcb67dbd578e709f",
"index": 736,
"step-1": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport csv\nfrom collections import defaultdict\nfrom docopt import docopt\n\n__doc__ = \"\"\"{f}\n\nUsage:\n {f} <used_file>\n {f} -h | --help\n\nOptions:\n -h --help Show this screen and exit.\n\"\"\".format(f=__file__)\n\nargs = docopt(__doc__)\nused_file = args['<used_file>']\n\nexceed_list = []\n\nuser_limit_dict = defaultdict(float)\n\nuser_limit_f = open('/opt/uge/Accounting_Statistics/etc/user_limit_py.csv', 'r')\n\nreader = csv.reader(user_limit_f)\nheader = next(reader)\nfor row in reader:\n user_limit_dict[row[0]] = float(row[1])\n\nprint user_limit_dict\n\n\nused_f = open(used_file, 'r')\n\nreader = csv.DictReader(used_f)\nfor row in reader:\n print row\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/python
# Project Euler problem 21
"""Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
If d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10,000."""
import math
# This is inefficient.
def get_divs(n):
divs = [1]
check = 2
rootn = math.sqrt(n)
while check < rootn:
if n % check == 0:
divs.append(check)
divs.append(n / check)
check += 1
if rootn == check:
divs.append(check)
divs.sort()
return divs
def amicable(a):
b = sum(get_divs(a))
if a == b: return 0
sum_b_divs = sum(get_divs(b))
if a == sum_b_divs:
return b
return 0
print sum([amicable(i) for i in range(1, 10000)])
|
normal
|
{
"blob_id": "2ee5991e2b6de6ee48c8207f2b78574fc8a02fc0",
"index": 2432,
"step-1": "#! /usr/bin/python\n\n# Project Euler problem 21\n\n\"\"\"Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\nIf d(a) = b and d(b) = a, where a != b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n\nFor example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10,000.\"\"\"\n\nimport math\n\n# This is inefficient.\ndef get_divs(n): \n divs = [1] \n check = 2 \n rootn = math.sqrt(n)\n \n while check < rootn: \n if n % check == 0: \n divs.append(check) \n divs.append(n / check) \n check += 1\n \n if rootn == check: \n divs.append(check) \n divs.sort()\n \n return divs \n\ndef amicable(a):\n b = sum(get_divs(a))\n \n if a == b: return 0\n \n sum_b_divs = sum(get_divs(b))\n \n if a == sum_b_divs:\n return b\n \n return 0\n\nprint sum([amicable(i) for i in range(1, 10000)])\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for row in range(0, 7):
for col in range(0, 7):
if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (
col > 0 and col < 6) or (row == 1 or row == 5 or row == 4
) and col == 6 or row == 3 and (col != 2 and col != 1):
result = result + '*'
else:
result = result + ' '
result = result + '\n'
print(result)
<|reserved_special_token_1|>
result = ''
for row in range(0, 7):
for col in range(0, 7):
if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (
col > 0 and col < 6) or (row == 1 or row == 5 or row == 4
) and col == 6 or row == 3 and (col != 2 and col != 1):
result = result + '*'
else:
result = result + ' '
result = result + '\n'
print(result)
<|reserved_special_token_1|>
# Write a Python program to print alphabet pattern 'G'.
result = ''
for row in range(0,7):
for col in range(0,7):
if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):
result = result+'*'
else:
result = result+' '
result=result+'\n'
print(result)
|
flexible
|
{
"blob_id": "e598091fc6c05b1d7f9f35f2ae58494fed53f9af",
"index": 5392,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor row in range(0, 7):\n for col in range(0, 7):\n if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (\n col > 0 and col < 6) or (row == 1 or row == 5 or row == 4\n ) and col == 6 or row == 3 and (col != 2 and col != 1):\n result = result + '*'\n else:\n result = result + ' '\n result = result + '\\n'\nprint(result)\n",
"step-3": "result = ''\nfor row in range(0, 7):\n for col in range(0, 7):\n if col == 0 and (row != 0 and row != 6) or (row == 0 or row == 6) and (\n col > 0 and col < 6) or (row == 1 or row == 5 or row == 4\n ) and col == 6 or row == 3 and (col != 2 and col != 1):\n result = result + '*'\n else:\n result = result + ' '\n result = result + '\\n'\nprint(result)\n",
"step-4": "# Write a Python program to print alphabet pattern 'G'.\n\nresult = ''\nfor row in range(0,7):\n for col in range(0,7):\n if ((col ==0) and (row !=0 and row !=6) or ((row ==0 or row == 6) and (col>0 and col<6))or ((row ==1 or row == 5 or row == 4)and (col ==6))or ((row ==3)and ((col!=2)and col!=1))):\n result = result+'*'\n else:\n result = result+' '\n result=result+'\\n'\nprint(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
alphabet = ' ' + string.ascii_lowercase
<|reserved_special_token_1|>
import string
alphabet = ' ' + string.ascii_lowercase
<|reserved_special_token_1|>
# Let's look at the lowercase letters.
import string
alphabet = " " + string.ascii_lowercase
|
flexible
|
{
"blob_id": "da3be0d3b815e11d292a7c7e8f5ce32b35580f98",
"index": 1016,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nalphabet = ' ' + string.ascii_lowercase\n",
"step-3": "import string\nalphabet = ' ' + string.ascii_lowercase\n",
"step-4": "# Let's look at the lowercase letters.\nimport string\nalphabet = \" \" + string.ascii_lowercase\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',
'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',
'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',
'SCHEDULERS', 'TRANSFORMS']
<|reserved_special_token_1|>
from catalyst_rl.contrib.registry import Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES, Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform, TRANSFORMS
from catalyst_rl.core.registry import Callback, CALLBACKS
from catalyst_rl.utils.tools.registry import Registry
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',
'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',
'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',
'SCHEDULERS', 'TRANSFORMS']
<|reserved_special_token_1|>
from catalyst_rl.contrib.registry import (
Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,
Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,
TRANSFORMS
)
from catalyst_rl.core.registry import Callback, CALLBACKS
from catalyst_rl.utils.tools.registry import Registry
def _callbacks_loader(r: Registry):
from catalyst_rl.dl import callbacks as m
r.add_from_module(m)
CALLBACKS.late_add(_callbacks_loader)
__all__ = [
"Callback",
"Criterion",
"Optimizer",
"Scheduler",
"Module",
"Model",
"Sampler",
"Transform",
"CALLBACKS",
"CRITERIONS",
"GRAD_CLIPPERS",
"MODELS",
"MODULES",
"OPTIMIZERS",
"SAMPLERS",
"SCHEDULERS",
"TRANSFORMS",
]
|
flexible
|
{
"blob_id": "09d13fe6b090850782feb601412cf135d497136f",
"index": 6206,
"step-1": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',\n 'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',\n 'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',\n 'SCHEDULERS', 'TRANSFORMS']\n",
"step-4": "from catalyst_rl.contrib.registry import Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES, Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform, TRANSFORMS\nfrom catalyst_rl.core.registry import Callback, CALLBACKS\nfrom catalyst_rl.utils.tools.registry import Registry\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n__all__ = ['Callback', 'Criterion', 'Optimizer', 'Scheduler', 'Module',\n 'Model', 'Sampler', 'Transform', 'CALLBACKS', 'CRITERIONS',\n 'GRAD_CLIPPERS', 'MODELS', 'MODULES', 'OPTIMIZERS', 'SAMPLERS',\n 'SCHEDULERS', 'TRANSFORMS']\n",
"step-5": "from catalyst_rl.contrib.registry import (\n Criterion, CRITERIONS, GRAD_CLIPPERS, Model, MODELS, Module, MODULES,\n Optimizer, OPTIMIZERS, Sampler, SAMPLERS, Scheduler, SCHEDULERS, Transform,\n TRANSFORMS\n)\nfrom catalyst_rl.core.registry import Callback, CALLBACKS\nfrom catalyst_rl.utils.tools.registry import Registry\n\n\ndef _callbacks_loader(r: Registry):\n from catalyst_rl.dl import callbacks as m\n r.add_from_module(m)\n\n\nCALLBACKS.late_add(_callbacks_loader)\n\n__all__ = [\n \"Callback\",\n \"Criterion\",\n \"Optimizer\",\n \"Scheduler\",\n \"Module\",\n \"Model\",\n \"Sampler\",\n \"Transform\",\n \"CALLBACKS\",\n \"CRITERIONS\",\n \"GRAD_CLIPPERS\",\n \"MODELS\",\n \"MODULES\",\n \"OPTIMIZERS\",\n \"SAMPLERS\",\n \"SCHEDULERS\",\n \"TRANSFORMS\",\n]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Violation(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Violation(object):
def __init__(self, line, column, code, message):
self.line = line
self.column = column
self.code = code
self.message = message
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Violation(object):
def __init__(self, line, column, code, message):
self.line = line
self.column = column
self.code = code
self.message = message
def __str__(self):
return self.message
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Violation(object):
def __init__(self, line, column, code, message):
self.line = line
self.column = column
self.code = code
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return 'Violation(line={}, column={}, code="{}", message="{}")'.format(
self.line, self.column, self.code, self.message)
|
flexible
|
{
"blob_id": "c513ad6ef12ae7be5d17d8d44787691cbc065207",
"index": 9989,
"step-1": "class Violation(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class Violation(object):\n\n def __init__(self, line, column, code, message):\n self.line = line\n self.column = column\n self.code = code\n self.message = message\n <mask token>\n <mask token>\n",
"step-3": "class Violation(object):\n\n def __init__(self, line, column, code, message):\n self.line = line\n self.column = column\n self.code = code\n self.message = message\n\n def __str__(self):\n return self.message\n <mask token>\n",
"step-4": "class Violation(object):\n\n def __init__(self, line, column, code, message):\n self.line = line\n self.column = column\n self.code = code\n self.message = message\n\n def __str__(self):\n return self.message\n\n def __repr__(self):\n return 'Violation(line={}, column={}, code=\"{}\", message=\"{}\")'.format(\n self.line, self.column, self.code, self.message)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from redis_interval.client import RedisInterval
class TestRedisIntervalIADD(object):
""" Tests the IADD command """
@classmethod
def setup_class(cls):
cls.redis = RedisInterval(host="localhost")
def test_add_simple_text(self):
""" Add simple text inside an interval """
value = self.redis.iadd("test", 0, 10, "simple text")
assert value == 'OK'
|
normal
|
{
"blob_id": "0e7732ffcada864fb83b59625c5b9abb01150aaa",
"index": 1702,
"step-1": "<mask token>\n\n\nclass TestRedisIntervalIADD(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestRedisIntervalIADD(object):\n <mask token>\n\n @classmethod\n def setup_class(cls):\n cls.redis = RedisInterval(host='localhost')\n\n def test_add_simple_text(self):\n \"\"\" Add simple text inside an interval \"\"\"\n value = self.redis.iadd('test', 0, 10, 'simple text')\n assert value == 'OK'\n",
"step-3": "<mask token>\n\n\nclass TestRedisIntervalIADD(object):\n \"\"\" Tests the IADD command \"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.redis = RedisInterval(host='localhost')\n\n def test_add_simple_text(self):\n \"\"\" Add simple text inside an interval \"\"\"\n value = self.redis.iadd('test', 0, 10, 'simple text')\n assert value == 'OK'\n",
"step-4": "from redis_interval.client import RedisInterval\n\n\nclass TestRedisIntervalIADD(object):\n \"\"\" Tests the IADD command \"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.redis = RedisInterval(host='localhost')\n\n def test_add_simple_text(self):\n \"\"\" Add simple text inside an interval \"\"\"\n value = self.redis.iadd('test', 0, 10, 'simple text')\n assert value == 'OK'\n",
"step-5": "from redis_interval.client import RedisInterval\n\n\nclass TestRedisIntervalIADD(object):\n \"\"\" Tests the IADD command \"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.redis = RedisInterval(host=\"localhost\")\n\n def test_add_simple_text(self):\n \"\"\" Add simple text inside an interval \"\"\"\n value = self.redis.iadd(\"test\", 0, 10, \"simple text\")\n assert value == 'OK'\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
<|reserved_special_token_0|>
print('Cursor Type : ', type(c))
c.execute(
'CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)'
)
c.execute(
"INSERT INTO users \tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)"
, (nowDateTime,))
c.execute(
'INSERT INTO users(id, username, email, phone, website, regdate) \tVALUES(?,?, ?, ?, ?, ?)'
, (2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime))
<|reserved_special_token_0|>
c.executemany(
'INSERT INTO users(id, username, email, phone, website, regdate)\tVALUES (?,?,?,?,?,?)'
, userList)
dbConn().close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
<|reserved_special_token_0|>
c = dbConn().cursor()
nowDateTime = nowDate()
print('Cursor Type : ', type(c))
c.execute(
'CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)'
)
c.execute(
"INSERT INTO users \tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)"
, (nowDateTime,))
c.execute(
'INSERT INTO users(id, username, email, phone, website, regdate) \tVALUES(?,?, ?, ?, ?, ?)'
, (2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime))
userList = (3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime
), (4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime), (
5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)
c.executemany(
'INSERT INTO users(id, username, email, phone, website, regdate)\tVALUES (?,?,?,?,?,?)'
, userList)
dbConn().close()
<|reserved_special_token_1|>
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from pkg._DB_INFO import DbConn as dbConn
from pkg._DB_INFO import sysDate as nowDate
c = dbConn().cursor()
nowDateTime = nowDate()
print('Cursor Type : ', type(c))
c.execute(
'CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)'
)
c.execute(
"INSERT INTO users \tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)"
, (nowDateTime,))
c.execute(
'INSERT INTO users(id, username, email, phone, website, regdate) \tVALUES(?,?, ?, ?, ?, ?)'
, (2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime))
userList = (3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime
), (4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime), (
5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)
c.executemany(
'INSERT INTO users(id, username, email, phone, website, regdate)\tVALUES (?,?,?,?,?,?)'
, userList)
dbConn().close()
<|reserved_special_token_1|>
# 데이터베이스 연동(SQLite)
# 테이블 생성 및 삽입
# pkg 폴더안에 db 파일이 있어서 해당 파일 import 하기 위해 ... 다른 방법 없을까 ...
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
# db 정보 import 후 DbConn 메소드를 dbConn으로 사용명 변경
from pkg._DB_INFO import DbConn as dbConn
from pkg._DB_INFO import sysDate as nowDate
# doConn에 Cursor(커서) 연결
# print('---> ', dir(dbConn())) # sqlite3.connect()에서 사용가능 메소드
c = dbConn().cursor()
nowDateTime = nowDate()
print('Cursor Type : ', type(c))
# 테이블 생성(Data Type : Text. Numeric, Integer, Real, Blob)
# CREATE TABLE IF NOT EXISTS --> 있으면 그대로 사용하고 없으면 테이블 생성
# PRIMARY KEY -> 기본 키, 중복 불가
c.execute('CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, \
email text, phone text, website text, regdate text)')
# ID는 PRIMARY KEY라서 중복 불가
# INSERT 쿼리 한번 실행 후 주석 처리
# 데이터 삽입
c.execute("INSERT INTO users \
VALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)",
(nowDateTime, ) ) # ? 뒤에 (, ) 안에 ,가 없으면 문자가 시퀀스 처리됨
# 다른 당법으로 데이터 삽입
c.execute('INSERT INTO users(id, username, email, phone, website, regdate) \
VALUES(?,?, ?, ?, ?, ?)',
(2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime) )
# Many INSERT (대용량 삽입) -> 튜플, 리스트 (둘은 괄호만 바꾸면 된다)
userList = (
(3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime),
(4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime),
(5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)
)
# 튜플 형태로 한번에 집어 넣기 --> 나중 크롤링 한 정보를 입력할 때 도움 됨
c.executemany("INSERT INTO users(id, username, email, phone, website, regdate)\
VALUES (?,?,?,?,?,?)", userList)
# 테이블 데이터 삭제
# c.execute('DELETE FROM users')
# 지우면서 print 함수로 몇개의 row를 지웠는지 확인 하는법
# print('users db delete : ', c.execute("DELETE FROM users").rowcount)
# 커밋 : isolation_level = None 일 경우 자동 반영 (오토 커밋)
# dbConn().commit() # 오토 커밋을 안했을 경우 직접 커밋을 해줘야 된다
# 롤백 : 롤백이 실행된 시점 기준으로 그 전 쿼리들을 실행 안하고 전으로 돌림
# dbConn().rollback()
# 접속 해제
dbConn().close()
# c.execute('DROP TABLE users') # 테이블 삭제
|
flexible
|
{
"blob_id": "b066ab81eccee538eb3f85b49a3e46c00a947428",
"index": 6154,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n<mask token>\nprint('Cursor Type : ', type(c))\nc.execute(\n 'CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)'\n )\nc.execute(\n \"INSERT INTO users \\tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)\"\n , (nowDateTime,))\nc.execute(\n 'INSERT INTO users(id, username, email, phone, website, regdate) \\tVALUES(?,?, ?, ?, ?, ?)'\n , (2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime))\n<mask token>\nc.executemany(\n 'INSERT INTO users(id, username, email, phone, website, regdate)\\tVALUES (?,?,?,?,?,?)'\n , userList)\ndbConn().close()\n",
"step-3": "<mask token>\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n<mask token>\nc = dbConn().cursor()\nnowDateTime = nowDate()\nprint('Cursor Type : ', type(c))\nc.execute(\n 'CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)'\n )\nc.execute(\n \"INSERT INTO users \\tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)\"\n , (nowDateTime,))\nc.execute(\n 'INSERT INTO users(id, username, email, phone, website, regdate) \\tVALUES(?,?, ?, ?, ?, ?)'\n , (2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime))\nuserList = (3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime\n ), (4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime), (\n 5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)\nc.executemany(\n 'INSERT INTO users(id, username, email, phone, website, regdate)\\tVALUES (?,?,?,?,?,?)'\n , userList)\ndbConn().close()\n",
"step-4": "import os, sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom pkg._DB_INFO import DbConn as dbConn\nfrom pkg._DB_INFO import sysDate as nowDate\nc = dbConn().cursor()\nnowDateTime = nowDate()\nprint('Cursor Type : ', type(c))\nc.execute(\n 'CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)'\n )\nc.execute(\n \"INSERT INTO users \\tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)\"\n , (nowDateTime,))\nc.execute(\n 'INSERT INTO users(id, username, email, phone, website, regdate) \\tVALUES(?,?, ?, ?, ?, ?)'\n , (2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime))\nuserList = (3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime\n ), (4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime), (\n 5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)\nc.executemany(\n 'INSERT INTO users(id, username, email, phone, website, regdate)\\tVALUES (?,?,?,?,?,?)'\n , userList)\ndbConn().close()\n",
"step-5": "# 데이터베이스 연동(SQLite)\n# 테이블 생성 및 삽입\n\n# pkg 폴더안에 db 파일이 있어서 해당 파일 import 하기 위해 ... 다른 방법 없을까 ...\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n\n# db 정보 import 후 DbConn 메소드를 dbConn으로 사용명 변경\nfrom pkg._DB_INFO import DbConn as dbConn\nfrom pkg._DB_INFO import sysDate as nowDate\n\n# doConn에 Cursor(커서) 연결 \n# print('---> ', dir(dbConn())) # sqlite3.connect()에서 사용가능 메소드\nc = dbConn().cursor()\nnowDateTime = nowDate()\nprint('Cursor Type : ', type(c))\n\n# 테이블 생성(Data Type : Text. Numeric, Integer, Real, Blob)\n# CREATE TABLE IF NOT EXISTS --> 있으면 그대로 사용하고 없으면 테이블 생성\n# PRIMARY KEY -> 기본 키, 중복 불가\nc.execute('CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, \\\nemail text, phone text, website text, regdate text)')\n\n# ID는 PRIMARY KEY라서 중복 불가\n# INSERT 쿼리 한번 실행 후 주석 처리\n\n# 데이터 삽입\nc.execute(\"INSERT INTO users \\\n\tVALUES(1, 'kang', 'abcdefg@aaa.com', '010-0000-0000', 'kang.com', ?)\",\n\t(nowDateTime, ) ) # ? 뒤에 (, ) 안에 ,가 없으면 문자가 시퀀스 처리됨\n\n# 다른 당법으로 데이터 삽입\nc.execute('INSERT INTO users(id, username, email, phone, website, regdate) \\\n\tVALUES(?,?, ?, ?, ?, ?)',\n\t(2, 'Park', 'Park@aaa.aaa', '010-0000-0001', 'Park.com', nowDateTime) )\n\n# Many INSERT (대용량 삽입) -> 튜플, 리스트 (둘은 괄호만 바꾸면 된다)\nuserList = (\n\t(3, 'Lee', 'Lee@Lee.com', '010-1111-1111', 'Lee.com', nowDateTime),\n\t(4, 'Lee', 'Cho@Cho.com', '010-2222-2222', 'Cho.com', nowDateTime),\n\t(5, 'Yoo', 'Yoo@Yoo.com', '010-4444-4444', 'Yoo.com', nowDateTime)\n)\n# 튜플 형태로 한번에 집어 넣기 --> 나중 크롤링 한 정보를 입력할 때 도움 됨\nc.executemany(\"INSERT INTO users(id, username, email, phone, website, regdate)\\\n\tVALUES (?,?,?,?,?,?)\", userList)\n\n# 테이블 데이터 삭제\n# c.execute('DELETE FROM users')\n# 지우면서 print 함수로 몇개의 row를 지웠는지 확인 하는법\n# print('users db delete : ', c.execute(\"DELETE FROM users\").rowcount)\n\n# 커밋 : isolation_level = None 일 경우 자동 반영 (오토 커밋)\n# dbConn().commit() # 오토 커밋을 안했을 경우 직접 커밋을 해줘야 된다\n# 롤백 : 롤백이 실행된 시점 기준으로 그 전 쿼리들을 실행 안하고 전으로 돌림\n# dbConn().rollback()\n\n# 접속 해제\ndbConn().close()\n\n# c.execute('DROP TABLE users') # 테이블 삭제\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Context:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = (
'arn:aws:lambda:eu-north-1:000000000000:function:{}'.format(
self.function_name))
self.aws_request_id = uuid.uuid1()
self.log_group_name = '/aws/lambda/{}'.format(self.function_name)
today = date.today()
self.log_stream_name = ('{}/[{}]4459c970fa6d4c77aca62c95850fce54'.
format(today.strftime('%Y/%m/%d'), self.function_version))
self.memory_limit_in_mb = Context.memory(self)
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = (
'arn:aws:lambda:eu-north-1:000000000000:function:{}'.format(
self.function_name))
self.aws_request_id = uuid.uuid1()
self.log_group_name = '/aws/lambda/{}'.format(self.function_name)
today = date.today()
self.log_stream_name = ('{}/[{}]4459c970fa6d4c77aca62c95850fce54'.
format(today.strftime('%Y/%m/%d'), self.function_version))
self.memory_limit_in_mb = Context.memory(self)
pass
def memory(self):
mem = int(os.popen(
'cat /sys/fs/cgroup/memory/memory.limit_in_bytes').read())
self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)
return self.memory_limit_in_mb
pass
<|reserved_special_token_1|>
import uuid
from datetime import date
import os
import humanize
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = (
'arn:aws:lambda:eu-north-1:000000000000:function:{}'.format(
self.function_name))
self.aws_request_id = uuid.uuid1()
self.log_group_name = '/aws/lambda/{}'.format(self.function_name)
today = date.today()
self.log_stream_name = ('{}/[{}]4459c970fa6d4c77aca62c95850fce54'.
format(today.strftime('%Y/%m/%d'), self.function_version))
self.memory_limit_in_mb = Context.memory(self)
pass
def memory(self):
mem = int(os.popen(
'cat /sys/fs/cgroup/memory/memory.limit_in_bytes').read())
self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)
return self.memory_limit_in_mb
pass
<|reserved_special_token_1|>
import uuid
from datetime import date
import os
import humanize
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = "arn:aws:lambda:eu-north-1:000000000000:function:{}".format(self.function_name)
self.aws_request_id = uuid.uuid1()
self.log_group_name = "/aws/lambda/{}".format(self.function_name)
today = date.today()
self.log_stream_name = "{}/[{}]4459c970fa6d4c77aca62c95850fce54".format(today.strftime("%Y/%m/%d"), self.function_version)
self.memory_limit_in_mb = Context.memory(self)
pass
def memory(self):
mem = int(os.popen("cat /sys/fs/cgroup/memory/memory.limit_in_bytes").read())
self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)
return (self.memory_limit_in_mb)
pass
|
flexible
|
{
"blob_id": "1c685514f53a320226402a4e4d8f3b3187fad615",
"index": 7814,
"step-1": "<mask token>\n\n\nclass Context:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Context:\n\n def __init__(self, function_name, function_version):\n self.function_name = function_name\n self.function_version = function_version\n self.invoked_function_arn = (\n 'arn:aws:lambda:eu-north-1:000000000000:function:{}'.format(\n self.function_name))\n self.aws_request_id = uuid.uuid1()\n self.log_group_name = '/aws/lambda/{}'.format(self.function_name)\n today = date.today()\n self.log_stream_name = ('{}/[{}]4459c970fa6d4c77aca62c95850fce54'.\n format(today.strftime('%Y/%m/%d'), self.function_version))\n self.memory_limit_in_mb = Context.memory(self)\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Context:\n\n def __init__(self, function_name, function_version):\n self.function_name = function_name\n self.function_version = function_version\n self.invoked_function_arn = (\n 'arn:aws:lambda:eu-north-1:000000000000:function:{}'.format(\n self.function_name))\n self.aws_request_id = uuid.uuid1()\n self.log_group_name = '/aws/lambda/{}'.format(self.function_name)\n today = date.today()\n self.log_stream_name = ('{}/[{}]4459c970fa6d4c77aca62c95850fce54'.\n format(today.strftime('%Y/%m/%d'), self.function_version))\n self.memory_limit_in_mb = Context.memory(self)\n pass\n\n def memory(self):\n mem = int(os.popen(\n 'cat /sys/fs/cgroup/memory/memory.limit_in_bytes').read())\n self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)\n return self.memory_limit_in_mb\n pass\n",
"step-4": "import uuid\nfrom datetime import date\nimport os\nimport humanize\n\n\nclass Context:\n\n def __init__(self, function_name, function_version):\n self.function_name = function_name\n self.function_version = function_version\n self.invoked_function_arn = (\n 'arn:aws:lambda:eu-north-1:000000000000:function:{}'.format(\n self.function_name))\n self.aws_request_id = uuid.uuid1()\n self.log_group_name = '/aws/lambda/{}'.format(self.function_name)\n today = date.today()\n self.log_stream_name = ('{}/[{}]4459c970fa6d4c77aca62c95850fce54'.\n format(today.strftime('%Y/%m/%d'), self.function_version))\n self.memory_limit_in_mb = Context.memory(self)\n pass\n\n def memory(self):\n mem = int(os.popen(\n 'cat /sys/fs/cgroup/memory/memory.limit_in_bytes').read())\n self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)\n return self.memory_limit_in_mb\n pass\n",
"step-5": "import uuid\nfrom datetime import date\nimport os\nimport humanize\n\n\nclass Context:\n def __init__(self, function_name, function_version):\n self.function_name = function_name\n self.function_version = function_version\n self.invoked_function_arn = \"arn:aws:lambda:eu-north-1:000000000000:function:{}\".format(self.function_name)\n self.aws_request_id = uuid.uuid1()\n self.log_group_name = \"/aws/lambda/{}\".format(self.function_name)\n today = date.today()\n self.log_stream_name = \"{}/[{}]4459c970fa6d4c77aca62c95850fce54\".format(today.strftime(\"%Y/%m/%d\"), self.function_version)\n self.memory_limit_in_mb = Context.memory(self)\n pass\n\n def memory(self):\n mem = int(os.popen(\"cat /sys/fs/cgroup/memory/memory.limit_in_bytes\").read())\n self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)\n return (self.memory_limit_in_mb)\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.